Commit 6ea427bb authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Christian Borntraeger

s390/mm: add reference counter to gmap structure

Let's use a reference counter mechanism to control the lifetime of
gmap structures. This will be needed for further changes related to
gmap shadows.
Reviewed-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent b2d73b2a
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
* @guest_to_host: radix tree with guest to host address translation * @guest_to_host: radix tree with guest to host address translation
* @host_to_guest: radix tree with pointer to segment table entries * @host_to_guest: radix tree with pointer to segment table entries
* @guest_table_lock: spinlock to protect all entries in the guest page table * @guest_table_lock: spinlock to protect all entries in the guest page table
* @ref_count: reference counter for the gmap structure
* @table: pointer to the page directory * @table: pointer to the page directory
* @asce: address space control element for gmap page table * @asce: address space control element for gmap page table
* @pfault_enabled: defines if pfaults are applicable for the guest * @pfault_enabled: defines if pfaults are applicable for the guest
...@@ -26,6 +27,7 @@ struct gmap { ...@@ -26,6 +27,7 @@ struct gmap {
struct radix_tree_root guest_to_host; struct radix_tree_root guest_to_host;
struct radix_tree_root host_to_guest; struct radix_tree_root host_to_guest;
spinlock_t guest_table_lock; spinlock_t guest_table_lock;
atomic_t ref_count;
unsigned long *table; unsigned long *table;
unsigned long asce; unsigned long asce;
unsigned long asce_end; unsigned long asce_end;
...@@ -44,8 +46,11 @@ struct gmap_notifier { ...@@ -44,8 +46,11 @@ struct gmap_notifier {
unsigned long end); unsigned long end);
}; };
struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit); struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
void gmap_free(struct gmap *gmap); void gmap_remove(struct gmap *gmap);
struct gmap *gmap_get(struct gmap *gmap);
void gmap_put(struct gmap *gmap);
void gmap_enable(struct gmap *gmap); void gmap_enable(struct gmap *gmap);
void gmap_disable(struct gmap *gmap); void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from, int gmap_map_segment(struct gmap *gmap, unsigned long from,
......
...@@ -532,20 +532,20 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att ...@@ -532,20 +532,20 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
if (!new_limit) if (!new_limit)
return -EINVAL; return -EINVAL;
/* gmap_alloc takes last usable address */ /* gmap_create takes last usable address */
if (new_limit != KVM_S390_NO_MEM_LIMIT) if (new_limit != KVM_S390_NO_MEM_LIMIT)
new_limit -= 1; new_limit -= 1;
ret = -EBUSY; ret = -EBUSY;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (!kvm->created_vcpus) { if (!kvm->created_vcpus) {
/* gmap_alloc will round the limit up */ /* gmap_create will round the limit up */
struct gmap *new = gmap_alloc(current->mm, new_limit); struct gmap *new = gmap_create(current->mm, new_limit);
if (!new) { if (!new) {
ret = -ENOMEM; ret = -ENOMEM;
} else { } else {
gmap_free(kvm->arch.gmap); gmap_remove(kvm->arch.gmap);
new->private = kvm; new->private = kvm;
kvm->arch.gmap = new; kvm->arch.gmap = new;
ret = 0; ret = 0;
...@@ -1394,7 +1394,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1394,7 +1394,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
else else
kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE, kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
sclp.hamax + 1); sclp.hamax + 1);
kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1); kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
if (!kvm->arch.gmap) if (!kvm->arch.gmap)
goto out_err; goto out_err;
kvm->arch.gmap->private = kvm; kvm->arch.gmap->private = kvm;
...@@ -1427,7 +1427,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -1427,7 +1427,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
sca_del_vcpu(vcpu); sca_del_vcpu(vcpu);
if (kvm_is_ucontrol(vcpu->kvm)) if (kvm_is_ucontrol(vcpu->kvm))
gmap_free(vcpu->arch.gmap); gmap_remove(vcpu->arch.gmap);
if (vcpu->kvm->arch.use_cmma) if (vcpu->kvm->arch.use_cmma)
kvm_s390_vcpu_unsetup_cmma(vcpu); kvm_s390_vcpu_unsetup_cmma(vcpu);
...@@ -1460,7 +1460,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -1460,7 +1460,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
debug_unregister(kvm->arch.dbf); debug_unregister(kvm->arch.dbf);
free_page((unsigned long)kvm->arch.sie_page2); free_page((unsigned long)kvm->arch.sie_page2);
if (!kvm_is_ucontrol(kvm)) if (!kvm_is_ucontrol(kvm))
gmap_free(kvm->arch.gmap); gmap_remove(kvm->arch.gmap);
kvm_s390_destroy_adapters(kvm); kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm); kvm_s390_clear_float_irqs(kvm);
KVM_EVENT(3, "vm 0x%pK destroyed", kvm); KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
...@@ -1469,7 +1469,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -1469,7 +1469,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
/* Section: vcpu related */ /* Section: vcpu related */
static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); vcpu->arch.gmap = gmap_create(current->mm, -1UL);
if (!vcpu->arch.gmap) if (!vcpu->arch.gmap)
return -ENOMEM; return -ENOMEM;
vcpu->arch.gmap->private = vcpu->kvm; vcpu->arch.gmap->private = vcpu->kvm;
......
...@@ -21,13 +21,13 @@ ...@@ -21,13 +21,13 @@
#include <asm/tlb.h> #include <asm/tlb.h>
/** /**
* gmap_alloc - allocate a guest address space * gmap_alloc - allocate and initialize a guest address space
* @mm: pointer to the parent mm_struct * @mm: pointer to the parent mm_struct
* @limit: maximum address of the gmap address space * @limit: maximum address of the gmap address space
* *
* Returns a guest address space structure. * Returns a guest address space structure.
*/ */
struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) static struct gmap *gmap_alloc(unsigned long limit)
{ {
struct gmap *gmap; struct gmap *gmap;
struct page *page; struct page *page;
...@@ -58,7 +58,7 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) ...@@ -58,7 +58,7 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL); INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC); INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
spin_lock_init(&gmap->guest_table_lock); spin_lock_init(&gmap->guest_table_lock);
gmap->mm = mm; atomic_set(&gmap->ref_count, 1);
page = alloc_pages(GFP_KERNEL, 2); page = alloc_pages(GFP_KERNEL, 2);
if (!page) if (!page)
goto out_free; goto out_free;
...@@ -70,9 +70,6 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) ...@@ -70,9 +70,6 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
gmap->asce = atype | _ASCE_TABLE_LENGTH | gmap->asce = atype | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | __pa(table); _ASCE_USER_BITS | __pa(table);
gmap->asce_end = limit; gmap->asce_end = limit;
spin_lock(&mm->context.gmap_lock);
list_add_rcu(&gmap->list, &mm->context.gmap_list);
spin_unlock(&mm->context.gmap_lock);
return gmap; return gmap;
out_free: out_free:
...@@ -80,7 +77,28 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) ...@@ -80,7 +77,28 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
out: out:
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(gmap_alloc);
/**
* gmap_create - create a guest address space
* @mm: pointer to the parent mm_struct
* @limit: maximum size of the gmap address space
*
* Returns a guest address space structure.
*/
struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
{
struct gmap *gmap;
gmap = gmap_alloc(limit);
if (!gmap)
return NULL;
gmap->mm = mm;
spin_lock(&mm->context.gmap_lock);
list_add_rcu(&gmap->list, &mm->context.gmap_list);
spin_unlock(&mm->context.gmap_lock);
return gmap;
}
EXPORT_SYMBOL_GPL(gmap_create);
static void gmap_flush_tlb(struct gmap *gmap) static void gmap_flush_tlb(struct gmap *gmap)
{ {
...@@ -118,21 +136,10 @@ static void gmap_radix_tree_free(struct radix_tree_root *root) ...@@ -118,21 +136,10 @@ static void gmap_radix_tree_free(struct radix_tree_root *root)
* gmap_free - free a guest address space * gmap_free - free a guest address space
* @gmap: pointer to the guest address space structure * @gmap: pointer to the guest address space structure
*/ */
void gmap_free(struct gmap *gmap) static void gmap_free(struct gmap *gmap)
{ {
struct page *page, *next; struct page *page, *next;
/* Flush tlb. */
if (MACHINE_HAS_IDTE)
__tlb_flush_asce(gmap->mm, gmap->asce);
else
__tlb_flush_global();
spin_lock(&gmap->mm->context.gmap_lock);
list_del_rcu(&gmap->list);
spin_unlock(&gmap->mm->context.gmap_lock);
synchronize_rcu();
/* Free all segment & region tables. */ /* Free all segment & region tables. */
list_for_each_entry_safe(page, next, &gmap->crst_list, lru) list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
__free_pages(page, 2); __free_pages(page, 2);
...@@ -140,7 +147,50 @@ void gmap_free(struct gmap *gmap) ...@@ -140,7 +147,50 @@ void gmap_free(struct gmap *gmap)
gmap_radix_tree_free(&gmap->host_to_guest); gmap_radix_tree_free(&gmap->host_to_guest);
kfree(gmap); kfree(gmap);
} }
EXPORT_SYMBOL_GPL(gmap_free);
/**
* gmap_get - increase reference counter for guest address space
* @gmap: pointer to the guest address space structure
*
* Returns the gmap pointer
*/
struct gmap *gmap_get(struct gmap *gmap)
{
atomic_inc(&gmap->ref_count);
return gmap;
}
EXPORT_SYMBOL_GPL(gmap_get);
/**
* gmap_put - decrease reference counter for guest address space
* @gmap: pointer to the guest address space structure
*
* If the reference counter reaches zero the guest address space is freed.
*/
void gmap_put(struct gmap *gmap)
{
if (atomic_dec_return(&gmap->ref_count) == 0)
gmap_free(gmap);
}
EXPORT_SYMBOL_GPL(gmap_put);
/**
* gmap_remove - remove a guest address space but do not free it yet
* @gmap: pointer to the guest address space structure
*/
void gmap_remove(struct gmap *gmap)
{
/* Flush tlb. */
gmap_flush_tlb(gmap);
/* Remove gmap from the pre-mm list */
spin_lock(&gmap->mm->context.gmap_lock);
list_del_rcu(&gmap->list);
spin_unlock(&gmap->mm->context.gmap_lock);
synchronize_rcu();
/* Put reference */
gmap_put(gmap);
}
EXPORT_SYMBOL_GPL(gmap_remove);
/** /**
* gmap_enable - switch primary space to the guest address space * gmap_enable - switch primary space to the guest address space
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment