Commit da3f7ca3 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-4.5-2' of...

Merge tag 'kvm-s390-next-4.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390 features and fixes for 4.5 (kvm/next)

Some small cleanups
- use assignment instead of memcpy
- use %pK for kernel pointers

Changes regarding guest memory size
- Fix an off-by-one error in our guest memory interface (we might
use unnecessarily big page tables, e.g. 3 levels for a 2GB guest
instead of 2 levels)
- We now ask the machine about the max. supported guest address
  and limit accordingly.
parents 46014634 32e6b236
...@@ -37,7 +37,8 @@ Returns: -EFAULT if the given address is not accessible ...@@ -37,7 +37,8 @@ Returns: -EFAULT if the given address is not accessible
Allows userspace to query the actual limit and set a new limit for Allows userspace to query the actual limit and set a new limit for
the maximum guest memory size. The limit will be rounded up to the maximum guest memory size. The limit will be rounded up to
2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by 2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by
the number of page table levels. the number of page table levels. In the case that there is no limit we will set
the limit to KVM_S390_NO_MEM_LIMIT (U64_MAX).
2. GROUP: KVM_S390_VM_CPU_MODEL 2. GROUP: KVM_S390_VM_CPU_MODEL
Architectures: s390 Architectures: s390
......
...@@ -627,6 +627,7 @@ struct kvm_arch{ ...@@ -627,6 +627,7 @@ struct kvm_arch{
struct kvm_s390_float_interrupt float_int; struct kvm_s390_float_interrupt float_int;
struct kvm_device *flic; struct kvm_device *flic;
struct gmap *gmap; struct gmap *gmap;
unsigned long mem_limit;
int css_support; int css_support;
int use_irqchip; int use_irqchip;
int use_cmma; int use_cmma;
......
...@@ -66,6 +66,8 @@ struct kvm_s390_io_adapter_req { ...@@ -66,6 +66,8 @@ struct kvm_s390_io_adapter_req {
#define KVM_S390_VM_MEM_CLR_CMMA 1 #define KVM_S390_VM_MEM_CLR_CMMA 1
#define KVM_S390_VM_MEM_LIMIT_SIZE 2 #define KVM_S390_VM_MEM_LIMIT_SIZE 2
#define KVM_S390_NO_MEM_LIMIT U64_MAX
/* kvm attributes for KVM_S390_VM_TOD */ /* kvm attributes for KVM_S390_VM_TOD */
#define KVM_S390_VM_TOD_LOW 0 #define KVM_S390_VM_TOD_LOW 0
#define KVM_S390_VM_TOD_HIGH 1 #define KVM_S390_VM_TOD_HIGH 1
......
...@@ -378,8 +378,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att ...@@ -378,8 +378,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att
case KVM_S390_VM_MEM_LIMIT_SIZE: case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0; ret = 0;
VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
kvm->arch.gmap->asce_end); kvm->arch.mem_limit);
if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
ret = -EFAULT; ret = -EFAULT;
break; break;
default: default:
...@@ -431,9 +431,17 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att ...@@ -431,9 +431,17 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
if (get_user(new_limit, (u64 __user *)attr->addr)) if (get_user(new_limit, (u64 __user *)attr->addr))
return -EFAULT; return -EFAULT;
if (new_limit > kvm->arch.gmap->asce_end) if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
new_limit > kvm->arch.mem_limit)
return -E2BIG; return -E2BIG;
if (!new_limit)
return -EINVAL;
/* gmap_alloc takes last usable address */
if (new_limit != KVM_S390_NO_MEM_LIMIT)
new_limit -= 1;
ret = -EBUSY; ret = -EBUSY;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (atomic_read(&kvm->online_vcpus) == 0) { if (atomic_read(&kvm->online_vcpus) == 0) {
...@@ -450,7 +458,9 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att ...@@ -450,7 +458,9 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
} }
} }
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit); VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
(void *) kvm->arch.gmap->asce);
break; break;
} }
default: default:
...@@ -1172,8 +1182,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1172,8 +1182,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type & KVM_VM_S390_UCONTROL) { if (type & KVM_VM_S390_UCONTROL) {
kvm->arch.gmap = NULL; kvm->arch.gmap = NULL;
kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
} else { } else {
kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); if (sclp.hamax == U64_MAX)
kvm->arch.mem_limit = TASK_MAX_SIZE;
else
kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
sclp.hamax + 1);
kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
if (!kvm->arch.gmap) if (!kvm->arch.gmap)
goto out_err; goto out_err;
kvm->arch.gmap->private = kvm; kvm->arch.gmap->private = kvm;
...@@ -1185,7 +1201,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1185,7 +1201,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.epoch = 0; kvm->arch.epoch = 0;
spin_lock_init(&kvm->arch.start_stop_lock); spin_lock_init(&kvm->arch.start_stop_lock);
KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid); KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
return 0; return 0;
out_err: out_err:
...@@ -1245,7 +1261,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -1245,7 +1261,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
gmap_free(kvm->arch.gmap); gmap_free(kvm->arch.gmap);
kvm_s390_destroy_adapters(kvm); kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm); kvm_s390_clear_float_irqs(kvm);
KVM_EVENT(3, "vm 0x%p destroyed", kvm); KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
} }
/* Section: vcpu related */ /* Section: vcpu related */
...@@ -1349,7 +1365,8 @@ static int sca_switch_to_extended(struct kvm *kvm) ...@@ -1349,7 +1365,8 @@ static int sca_switch_to_extended(struct kvm *kvm)
free_page((unsigned long)old_sca); free_page((unsigned long)old_sca);
VM_EVENT(kvm, 2, "Switched to ESCA (%p -> %p)", old_sca, kvm->arch.sca); VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
old_sca, kvm->arch.sca);
return 0; return 0;
} }
...@@ -1624,7 +1641,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1624,7 +1641,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
rc = kvm_vcpu_init(vcpu, kvm, id); rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc) if (rc)
goto out_free_sie_block; goto out_free_sie_block;
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
vcpu->arch.sie_block); vcpu->arch.sie_block);
trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
...@@ -2120,7 +2137,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) ...@@ -2120,7 +2137,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
*/ */
kvm_check_async_pf_completion(vcpu); kvm_check_async_pf_completion(vcpu);
memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
if (need_resched()) if (need_resched())
schedule(); schedule();
...@@ -2185,7 +2203,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) ...@@ -2185,7 +2203,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
if (guestdbg_enabled(vcpu)) if (guestdbg_enabled(vcpu))
kvm_s390_restore_guest_per_regs(vcpu); kvm_s390_restore_guest_per_regs(vcpu);
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
if (vcpu->arch.sie_block->icptcode > 0) { if (vcpu->arch.sie_block->icptcode > 0) {
int rc = kvm_handle_sie_intercept(vcpu); int rc = kvm_handle_sie_intercept(vcpu);
...@@ -2826,6 +2845,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -2826,6 +2845,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->memory_size & 0xffffful) if (mem->memory_size & 0xffffful)
return -EINVAL; return -EINVAL;
if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
return -EINVAL;
return 0; return 0;
} }
......
...@@ -55,8 +55,8 @@ TRACE_EVENT(kvm_s390_create_vcpu, ...@@ -55,8 +55,8 @@ TRACE_EVENT(kvm_s390_create_vcpu,
__entry->sie_block = sie_block; __entry->sie_block = sie_block;
), ),
TP_printk("create cpu %d at %p, sie block at %p", __entry->id, TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK",
__entry->vcpu, __entry->sie_block) __entry->id, __entry->vcpu, __entry->sie_block)
); );
TRACE_EVENT(kvm_s390_destroy_vcpu, TRACE_EVENT(kvm_s390_destroy_vcpu,
...@@ -254,7 +254,7 @@ TRACE_EVENT(kvm_s390_enable_css, ...@@ -254,7 +254,7 @@ TRACE_EVENT(kvm_s390_enable_css,
__entry->kvm = kvm; __entry->kvm = kvm;
), ),
TP_printk("enabling channel I/O support (kvm @ %p)\n", TP_printk("enabling channel I/O support (kvm @ %pK)\n",
__entry->kvm) __entry->kvm)
); );
......
...@@ -133,7 +133,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) ...@@ -133,7 +133,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
/** /**
* gmap_alloc - allocate a guest address space * gmap_alloc - allocate a guest address space
* @mm: pointer to the parent mm_struct * @mm: pointer to the parent mm_struct
* @limit: maximum size of the gmap address space * @limit: maximum address of the gmap address space
* *
* Returns a guest address space structure. * Returns a guest address space structure.
*/ */
...@@ -402,7 +402,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, ...@@ -402,7 +402,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
if ((from | to | len) & (PMD_SIZE - 1)) if ((from | to | len) & (PMD_SIZE - 1))
return -EINVAL; return -EINVAL;
if (len == 0 || from + len < from || to + len < to || if (len == 0 || from + len < from || to + len < to ||
from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
return -EINVAL; return -EINVAL;
flush = 0; flush = 0;
......
...@@ -40,7 +40,8 @@ struct read_info_sccb { ...@@ -40,7 +40,8 @@ struct read_info_sccb {
u8 fac85; /* 85 */ u8 fac85; /* 85 */
u8 _pad_86[91 - 86]; /* 86-90 */ u8 _pad_86[91 - 86]; /* 86-90 */
u8 flags; /* 91 */ u8 flags; /* 91 */
u8 _pad_92[100 - 92]; /* 92-99 */ u8 _pad_92[99 - 92]; /* 92-98 */
u8 hamaxpow; /* 99 */
u32 rnsize2; /* 100-103 */ u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */ u64 rnmax2; /* 104-111 */
u8 _pad_112[116 - 112]; /* 112-115 */ u8 _pad_112[116 - 112]; /* 112-115 */
...@@ -120,6 +121,11 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) ...@@ -120,6 +121,11 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp.rzm <<= 20; sclp.rzm <<= 20;
sclp.ibc = sccb->ibc; sclp.ibc = sccb->ibc;
if (sccb->hamaxpow && sccb->hamaxpow < 64)
sclp.hamax = (1UL << sccb->hamaxpow) - 1;
else
sclp.hamax = U64_MAX;
if (!sccb->hcpua) { if (!sccb->hcpua) {
if (MACHINE_IS_VM) if (MACHINE_IS_VM)
sclp.max_cores = 64; sclp.max_cores = 64;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment