Commit 2c69c1a3 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-20140910' of...

Merge tag 'kvm-s390-next-20140910' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

KVM: s390: Fixes and features for next (3.18)

1. Crypto/CPACF support: To enable the MSA4 instructions we have to
   provide a common control structure for each SIE control block
2. Two cleanups found by a static code checker: one redundant assignment
   and one useless if
3. Fix the page handling of the diag10 ballooning interface. If the
   guest freed the pages at absolute 0 some checks and frees were
   incorrect
4. Limit guests to 16TB
5. Add __must_check to interrupt injection code
parents 209cf19f bfac1f59
......@@ -157,7 +157,9 @@ struct kvm_s390_sie_block {
__u8 armid; /* 0x00e3 */
__u8 reservede4[4]; /* 0x00e4 */
__u64 tecmc; /* 0x00e8 */
__u8 reservedf0[16]; /* 0x00f0 */
__u8 reservedf0[12]; /* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
__u8 reserved188[24]; /* 0x0188 */
......@@ -410,6 +412,15 @@ struct s390_io_adapter {
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256
struct kvm_s390_crypto {
struct kvm_s390_crypto_cb *crycb;
__u32 crycbd;
};
struct kvm_s390_crypto_cb {
__u8 reserved00[128]; /* 0x0000 */
};
struct kvm_arch{
struct sca_block *sca;
debug_info_t *dbf;
......@@ -423,6 +434,7 @@ struct kvm_arch{
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
spinlock_t start_stop_lock;
struct kvm_s390_crypto crypto;
};
#define KVM_HVA_ERR_BAD (-1UL)
......
......@@ -28,22 +28,32 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
|| start < 2 * PAGE_SIZE)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
vcpu->stat.diagnose_10++;
/* we checked for start > end above */
if (end < prefix || start >= prefix + 2 * PAGE_SIZE) {
/*
* We checked for start >= end above, so lets check for the
* fast path (no prefix swap page involved)
*/
if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
gmap_discard(vcpu->arch.gmap, start, end);
} else {
if (start < prefix)
gmap_discard(vcpu->arch.gmap, start, prefix);
if (end >= prefix)
gmap_discard(vcpu->arch.gmap,
prefix + 2 * PAGE_SIZE, end);
/*
* This is slow path. gmap_discard will check for start
* so lets split this into before prefix, prefix, after
* prefix and let gmap_discard make some of these calls
* NOPs.
*/
gmap_discard(vcpu->arch.gmap, start, prefix);
if (start <= prefix)
gmap_discard(vcpu->arch.gmap, 0, 4096);
if (end > prefix + 4096)
gmap_discard(vcpu->arch.gmap, 4096, 8192);
gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
}
return 0;
}
......
......@@ -254,8 +254,7 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
new = old = ACCESS_ONCE(*ic);
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
if (!ipte_lock_count)
wake_up(&vcpu->kvm->arch.ipte_wq);
wake_up(&vcpu->kvm->arch.ipte_wq);
out:
mutex_unlock(&ipte_mutex);
}
......
......@@ -28,7 +28,7 @@
#define IOINT_AI_MASK 0x04000000
#define PFAULT_INIT 0x0600
static int deliver_ckc_interrupt(struct kvm_vcpu *vcpu);
static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu);
static int is_ioint(u64 type)
{
......@@ -77,7 +77,7 @@ static u64 int_word_to_isc_bits(u32 int_word)
return (0x80 >> isc) << 24;
}
static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt_info *inti)
{
switch (inti->type) {
......@@ -86,6 +86,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
return 0;
if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
return 1;
return 0;
case KVM_S390_INT_EMERGENCY:
if (psw_extint_disabled(vcpu))
return 0;
......@@ -225,7 +226,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
}
}
static int __deliver_prog_irq(struct kvm_vcpu *vcpu,
static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
struct kvm_s390_pgm_info *pgm_info)
{
int rc = 0;
......@@ -307,7 +308,7 @@ static int __deliver_prog_irq(struct kvm_vcpu *vcpu,
return rc;
}
static int __do_deliver_interrupt(struct kvm_vcpu *vcpu,
static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt_info *inti)
{
const unsigned short table[] = { 2, 4, 4, 6 };
......@@ -508,7 +509,7 @@ static int __do_deliver_interrupt(struct kvm_vcpu *vcpu,
return rc;
}
static int deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
{
int rc;
......@@ -657,7 +658,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
&vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
}
int kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
......@@ -1351,7 +1352,6 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
r = enqueue_floating_irq(dev, attr);
break;
case KVM_DEV_FLIC_CLEAR_IRQS:
r = 0;
kvm_s390_clear_float_irqs(dev->kvm);
break;
case KVM_DEV_FLIC_APF_ENABLE:
......
......@@ -392,6 +392,22 @@ long kvm_arch_vm_ioctl(struct file *filp,
return r;
}
static int kvm_s390_crypto_init(struct kvm *kvm)
{
if (!test_vfacility(76))
return 0;
kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
GFP_KERNEL | GFP_DMA);
if (!kvm->arch.crypto.crycb)
return -ENOMEM;
kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
CRYCB_FORMAT1;
return 0;
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int rc;
......@@ -429,6 +445,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.dbf)
goto out_nodbf;
if (kvm_s390_crypto_init(kvm) < 0)
goto out_crypto;
spin_lock_init(&kvm->arch.float_int.lock);
INIT_LIST_HEAD(&kvm->arch.float_int.list);
init_waitqueue_head(&kvm->arch.ipte_wq);
......@@ -439,7 +458,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type & KVM_VM_S390_UCONTROL) {
kvm->arch.gmap = NULL;
} else {
kvm->arch.gmap = gmap_alloc(current->mm, -1UL);
kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
if (!kvm->arch.gmap)
goto out_nogmap;
kvm->arch.gmap->private = kvm;
......@@ -453,6 +472,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return 0;
out_nogmap:
kfree(kvm->arch.crypto.crycb);
out_crypto:
debug_unregister(kvm->arch.dbf);
out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
......@@ -507,6 +528,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_free_vcpus(kvm);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
kfree(kvm->arch.crypto.crycb);
if (!kvm_is_ucontrol(kvm))
gmap_free(kvm->arch.gmap);
kvm_s390_destroy_adapters(kvm);
......@@ -588,6 +610,14 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
return 0;
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
if (!test_vfacility(76))
return;
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
}
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
{
free_page(vcpu->arch.sie_block->cbrlo);
......@@ -634,6 +664,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
get_cpu_id(&vcpu->arch.cpu_id);
vcpu->arch.cpu_id.version = 0xff;
kvm_s390_vcpu_crypto_setup(vcpu);
return rc;
}
......
......@@ -138,7 +138,7 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
int kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
void kvm_s390_clear_float_irqs(struct kvm *kvm);
int __must_check kvm_s390_inject_vm(struct kvm *kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment