Commit a9d23e71 authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

s390/mm: shadow pages with real guest requested protection

We really want to avoid manually handling protection for nested
virtualization. By shadowing pages with the protection the guest asked us
for, the SIE can handle most protection-related actions for us (e.g.
special handling for MVPG) and we can directly forward protection
exceptions to the guest.

PTEs will now always be shadowed with the correct _PAGE_PROTECT flag.
Unshadowing will take care of any guest changes to the parent PTE and
any host changes to the host PTE. If the host PTE doesn't have the
fitting access rights or is not available, we have to fix it up.
Acked-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent eea3678d
...@@ -110,8 +110,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt); ...@@ -110,8 +110,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt);
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt); int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt);
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
unsigned long *pgt, int *dat_protection); unsigned long *pgt, int *dat_protection);
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
unsigned long paddr, int write);
void gmap_register_pte_notifier(struct gmap_notifier *); void gmap_register_pte_notifier(struct gmap_notifier *);
void gmap_unregister_pte_notifier(struct gmap_notifier *); void gmap_unregister_pte_notifier(struct gmap_notifier *);
......
...@@ -895,7 +895,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, ...@@ -895,7 +895,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , int reset); pte_t *ptep , int reset);
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
pte_t *sptep, pte_t *tptep, int write); pte_t *sptep, pte_t *tptep, pte_t pte);
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address); bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
......
...@@ -1109,7 +1109,7 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write) ...@@ -1109,7 +1109,7 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write)
dat_protection |= pte.p; dat_protection |= pte.p;
if (write && dat_protection) if (write && dat_protection)
return PGM_PROTECTION; return PGM_PROTECTION;
rc = gmap_shadow_page(sg, saddr, pte.pfra * 4096, write); rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
if (rc) if (rc)
return rc; return rc;
return 0; return 0;
......
...@@ -1743,8 +1743,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt); ...@@ -1743,8 +1743,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
* gmap_shadow_page - create a shadow page mapping * gmap_shadow_page - create a shadow page mapping
* @sg: pointer to the shadow guest address space structure * @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap * @saddr: faulting address in the shadow gmap
* @paddr: parent gmap address to get mapped at @saddr * @pte: pte in parent gmap address space to get shadowed
* @write: =1 map r/w, =0 map r/o
* *
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and * shadow table structure is incomplete, -ENOMEM if out of memory and
...@@ -1752,12 +1751,11 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt); ...@@ -1752,12 +1751,11 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
* *
* Called with sg->mm->mmap_sem in read. * Called with sg->mm->mmap_sem in read.
*/ */
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
unsigned long paddr, int write)
{ {
struct gmap *parent; struct gmap *parent;
struct gmap_rmap *rmap; struct gmap_rmap *rmap;
unsigned long vmaddr; unsigned long vmaddr, paddr;
spinlock_t *ptl; spinlock_t *ptl;
pte_t *sptep, *tptep; pte_t *sptep, *tptep;
int rc; int rc;
...@@ -1771,6 +1769,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, ...@@ -1771,6 +1769,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr,
rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE; rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
while (1) { while (1) {
paddr = pte_val(pte) & PAGE_MASK;
vmaddr = __gmap_translate(parent, paddr); vmaddr = __gmap_translate(parent, paddr);
if (IS_ERR_VALUE(vmaddr)) { if (IS_ERR_VALUE(vmaddr)) {
rc = vmaddr; rc = vmaddr;
...@@ -1791,8 +1790,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, ...@@ -1791,8 +1790,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr,
radix_tree_preload_end(); radix_tree_preload_end();
break; break;
} }
rc = ptep_shadow_pte(sg->mm, saddr, rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
sptep, tptep, write);
if (rc > 0) { if (rc > 0) {
/* Success and a new mapping */ /* Success and a new mapping */
gmap_insert_rmap(sg, vmaddr, rmap); gmap_insert_rmap(sg, vmaddr, rmap);
......
...@@ -463,29 +463,27 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr, ...@@ -463,29 +463,27 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
} }
int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
pte_t *sptep, pte_t *tptep, int write) pte_t *sptep, pte_t *tptep, pte_t pte)
{ {
pgste_t spgste, tpgste; pgste_t spgste, tpgste;
pte_t spte, tpte; pte_t spte, tpte;
int rc = -EAGAIN; int rc = -EAGAIN;
if (!(pte_val(*tptep) & _PAGE_INVALID))
return 0; /* already shadowed */
spgste = pgste_get_lock(sptep); spgste = pgste_get_lock(sptep);
spte = *sptep; spte = *sptep;
if (!(pte_val(spte) & _PAGE_INVALID) && if (!(pte_val(spte) & _PAGE_INVALID) &&
!(pte_val(spte) & _PAGE_PROTECT)) { !((pte_val(spte) & _PAGE_PROTECT) &&
rc = 0; !(pte_val(pte) & _PAGE_PROTECT))) {
if (!(pte_val(*tptep) & _PAGE_INVALID))
/* Update existing mapping */
ptep_flush_direct(mm, saddr, tptep);
else
rc = 1;
pgste_val(spgste) |= PGSTE_VSIE_BIT; pgste_val(spgste) |= PGSTE_VSIE_BIT;
tpgste = pgste_get_lock(tptep); tpgste = pgste_get_lock(tptep);
pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
(write ? 0 : _PAGE_PROTECT); (pte_val(pte) & _PAGE_PROTECT);
/* don't touch the storage key - it belongs to parent pgste */ /* don't touch the storage key - it belongs to parent pgste */
tpgste = pgste_set_pte(tptep, tpgste, tpte); tpgste = pgste_set_pte(tptep, tpgste, tpte);
pgste_set_unlock(tptep, tpgste); pgste_set_unlock(tptep, tpgste);
rc = 1;
} }
pgste_set_unlock(sptep, spgste); pgste_set_unlock(sptep, spgste);
return rc; return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment