Commit 981467c9 authored by Michael Mueller's avatar Michael Mueller Committed by Christian Borntraeger

KVM: s390: include guest facilities in kvm facility test

Most facility related decisions in KVM have to take into account:

- the facilities offered by the underlying run container (LPAR/VM)
- the facilities supported by the KVM code itself
- the facilities requested by a guest VM

This patch adds the KVM driver requested facilities to the test routine.

It additionally renames struct s390_model_fac to kvm_s390_fac and its field
names to be more meaningful.

The semantics of the facilities stored in the KVM architecture structure
is changed. The address arch.model.fac->list now points to the guest
facility list and arch.model.fac->mask points to the KVM facility mask.

This patch fixes the behaviour of KVM for some facilities for guests
that ignore the guest visible facility bits, e.g. guests could use
transactional memory intructions on hosts supporting them even if the
chosen cpu model would not offer them.

The userspace interface is not affected by this change.
Signed-off-by: default avatarMichael Mueller <mimu@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 94422ee8
...@@ -515,15 +515,15 @@ struct s390_io_adapter { ...@@ -515,15 +515,15 @@ struct s390_io_adapter {
#define S390_ARCH_FAC_MASK_SIZE_U64 \ #define S390_ARCH_FAC_MASK_SIZE_U64 \
(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
struct s390_model_fac { struct kvm_s390_fac {
/* facilities used in SIE context */ /* facility list requested by guest */
__u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
/* subset enabled by kvm */ /* facility mask supported by kvm & hosting machine */
__u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
}; };
struct kvm_s390_cpu_model { struct kvm_s390_cpu_model {
struct s390_model_fac *fac; struct kvm_s390_fac *fac;
struct cpuid cpu_id; struct cpuid cpu_id;
unsigned short ibc; unsigned short ibc;
}; };
......
...@@ -522,7 +522,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -522,7 +522,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
sizeof(struct cpuid)); sizeof(struct cpuid));
kvm->arch.model.ibc = proc->ibc; kvm->arch.model.ibc = proc->ibc;
memcpy(kvm->arch.model.fac->kvm, proc->fac_list, memcpy(kvm->arch.model.fac->list, proc->fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE); S390_ARCH_FAC_LIST_SIZE_BYTE);
} else } else
ret = -EFAULT; ret = -EFAULT;
...@@ -556,7 +556,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -556,7 +556,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
} }
memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
proc->ibc = kvm->arch.model.ibc; proc->ibc = kvm->arch.model.ibc;
memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
ret = -EFAULT; ret = -EFAULT;
kfree(proc); kfree(proc);
...@@ -576,8 +576,8 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -576,8 +576,8 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
} }
get_cpu_id((struct cpuid *) &mach->cpuid); get_cpu_id((struct cpuid *) &mach->cpuid);
mach->ibc = sclp_get_ibc(); mach->ibc = sclp_get_ibc();
memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
kvm_s390_fac_list_mask_size() * sizeof(u64)); S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE); S390_ARCH_FAC_LIST_SIZE_BYTE);
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
...@@ -893,16 +893,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -893,16 +893,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* /*
* The architectural maximum amount of facilities is 16 kbit. To store * The architectural maximum amount of facilities is 16 kbit. To store
* this amount, 2 kbyte of memory is required. Thus we need a full * this amount, 2 kbyte of memory is required. Thus we need a full
* page to hold the active copy (arch.model.fac->sie) and the current * page to hold the guest facility list (arch.model.fac->list) and the
* facilities set (arch.model.fac->kvm). Its address size has to be * facility mask (arch.model.fac->mask). Its address size has to be
* 31 bits and word aligned. * 31 bits and word aligned.
*/ */
kvm->arch.model.fac = kvm->arch.model.fac =
(struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!kvm->arch.model.fac) if (!kvm->arch.model.fac)
goto out_nofac; goto out_nofac;
memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE); S390_ARCH_FAC_LIST_SIZE_BYTE);
/* /*
...@@ -914,7 +914,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -914,7 +914,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
*/ */
if (!MACHINE_IS_LPAR) if (!MACHINE_IS_LPAR)
for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->mask[i];
/* /*
* Apply the kvm facility mask to limit the kvm supported/tolerated * Apply the kvm facility mask to limit the kvm supported/tolerated
...@@ -922,11 +922,15 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -922,11 +922,15 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
*/ */
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
if (i < kvm_s390_fac_list_mask_size()) if (i < kvm_s390_fac_list_mask_size())
kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
else else
kvm->arch.model.fac->kvm[i] = 0UL; kvm->arch.model.fac->mask[i] = 0UL;
} }
/* Populate the facility list initially. */
memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
...@@ -1172,8 +1176,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1172,8 +1176,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
S390_ARCH_FAC_LIST_SIZE_BYTE);
vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
...@@ -1219,7 +1221,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1219,7 +1221,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
} }
vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
spin_lock_init(&vcpu->arch.local_int.lock); spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int; vcpu->arch.local_int.float_int = &kvm->arch.float_int;
......
...@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) ...@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
/* test availability of facility in a kvm intance */ /* test availability of facility in a kvm intance */
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{ {
return __test_facility(nr, kvm->arch.model.fac->kvm); return __test_facility(nr, kvm->arch.model.fac->mask) &&
__test_facility(nr, kvm->arch.model.fac->list);
} }
/* are cpu states controlled by user space */ /* are cpu states controlled by user space */
......
...@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) ...@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
* We need to shift the lower 32 facility bits (bit 0-31) from a u64 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
* into a u32 memory representation. They will remain bits 0-31. * into a u32 memory representation. They will remain bits 0-31.
*/ */
fac = *vcpu->kvm->arch.model.fac->sie >> 32; fac = *vcpu->kvm->arch.model.fac->list >> 32;
rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&fac, sizeof(fac)); &fac, sizeof(fac));
if (rc) if (rc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment