Commit a2fa301f authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-20140304' of...

Merge tag 'kvm-s390-20140304' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next
parents 332967a3 96b14536
...@@ -44,11 +44,21 @@ struct airq_iv { ...@@ -44,11 +44,21 @@ struct airq_iv {
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags); struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
void airq_iv_release(struct airq_iv *iv); void airq_iv_release(struct airq_iv *iv);
unsigned long airq_iv_alloc_bit(struct airq_iv *iv); unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num);
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit); void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num);
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end); unsigned long end);
static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
{
return airq_iv_alloc(iv, 1);
}
static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
{
airq_iv_free(iv, bit, 1);
}
static inline unsigned long airq_iv_end(struct airq_iv *iv) static inline unsigned long airq_iv_end(struct airq_iv *iv)
{ {
return iv->end; return iv->end;
......
...@@ -53,6 +53,7 @@ enum interruption_class { ...@@ -53,6 +53,7 @@ enum interruption_class {
IRQIO_PCI, IRQIO_PCI,
IRQIO_MSI, IRQIO_MSI,
IRQIO_VIR, IRQIO_VIR,
IRQIO_VAI,
NMI_NMI, NMI_NMI,
CPU_RST, CPU_RST,
NR_ARCH_IRQS NR_ARCH_IRQS
......
...@@ -107,7 +107,9 @@ struct kvm_s390_sie_block { ...@@ -107,7 +107,9 @@ struct kvm_s390_sie_block {
__u64 gbea; /* 0x0180 */ __u64 gbea; /* 0x0180 */
__u8 reserved188[24]; /* 0x0188 */ __u8 reserved188[24]; /* 0x0188 */
__u32 fac; /* 0x01a0 */ __u32 fac; /* 0x01a0 */
__u8 reserved1a4[68]; /* 0x01a4 */ __u8 reserved1a4[58]; /* 0x01a4 */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */ __u64 itdba; /* 0x01e8 */
__u8 reserved1f0[16]; /* 0x01f0 */ __u8 reserved1f0[16]; /* 0x01f0 */
} __attribute__((packed)); } __attribute__((packed));
...@@ -213,7 +215,6 @@ struct kvm_s390_float_interrupt { ...@@ -213,7 +215,6 @@ struct kvm_s390_float_interrupt {
int next_rr_cpu; int next_rr_cpu;
unsigned long idle_mask[(KVM_MAX_VCPUS + sizeof(long) - 1) unsigned long idle_mask[(KVM_MAX_VCPUS + sizeof(long) - 1)
/ sizeof(long)]; / sizeof(long)];
struct kvm_s390_local_interrupt *local_int[KVM_MAX_VCPUS];
unsigned int irq_count; unsigned int irq_count;
}; };
......
...@@ -76,4 +76,6 @@ struct kvm_sync_regs { ...@@ -76,4 +76,6 @@ struct kvm_sync_regs {
#define KVM_REG_S390_PFTOKEN (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x5) #define KVM_REG_S390_PFTOKEN (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x5)
#define KVM_REG_S390_PFCOMPARE (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x6) #define KVM_REG_S390_PFCOMPARE (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x6)
#define KVM_REG_S390_PFSELECT (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x7) #define KVM_REG_S390_PFSELECT (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x7)
#define KVM_REG_S390_PP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x8)
#define KVM_REG_S390_GBEA (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x9)
#endif #endif
...@@ -84,6 +84,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { ...@@ -84,6 +84,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
[IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" }, [IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
[IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" }, [IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
[IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, [IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
[IRQIO_VAI] = {.name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"}, [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
[CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
}; };
......
...@@ -692,6 +692,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -692,6 +692,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct kvm_s390_float_interrupt *fi; struct kvm_s390_float_interrupt *fi;
struct kvm_s390_interrupt_info *iter; struct kvm_s390_interrupt_info *iter;
struct kvm_vcpu *dst_vcpu = NULL;
int sigcpu; int sigcpu;
int rc = 0; int rc = 0;
...@@ -726,9 +727,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -726,9 +727,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
sigcpu = fi->next_rr_cpu++; sigcpu = fi->next_rr_cpu++;
if (sigcpu == KVM_MAX_VCPUS) if (sigcpu == KVM_MAX_VCPUS)
sigcpu = fi->next_rr_cpu = 0; sigcpu = fi->next_rr_cpu = 0;
} while (fi->local_int[sigcpu] == NULL); } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
} }
li = fi->local_int[sigcpu]; dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock); spin_lock_bh(&li->lock);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(li->wq)) if (waitqueue_active(li->wq))
......
...@@ -386,6 +386,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) ...@@ -386,6 +386,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.guest_fpregs.fpc = 0; vcpu->arch.guest_fpregs.fpc = 0;
asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
vcpu->arch.sie_block->gbea = 1; vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu); kvm_clear_async_pf_completion_queue(vcpu);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
...@@ -459,11 +460,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -459,11 +460,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
spin_lock_init(&vcpu->arch.local_int.lock); spin_lock_init(&vcpu->arch.local_int.lock);
INIT_LIST_HEAD(&vcpu->arch.local_int.list); INIT_LIST_HEAD(&vcpu->arch.local_int.list);
vcpu->arch.local_int.float_int = &kvm->arch.float_int; vcpu->arch.local_int.float_int = &kvm->arch.float_int;
spin_lock(&kvm->arch.float_int.lock);
kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.wq = &vcpu->wq;
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
spin_unlock(&kvm->arch.float_int.lock);
rc = kvm_vcpu_init(vcpu, kvm, id); rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc) if (rc)
...@@ -571,6 +569,14 @@ static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, ...@@ -571,6 +569,14 @@ static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
r = put_user(vcpu->arch.pfault_select, r = put_user(vcpu->arch.pfault_select,
(u64 __user *)reg->addr); (u64 __user *)reg->addr);
break; break;
case KVM_REG_S390_PP:
r = put_user(vcpu->arch.sie_block->pp,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_GBEA:
r = put_user(vcpu->arch.sie_block->gbea,
(u64 __user *)reg->addr);
break;
default: default:
break; break;
} }
...@@ -612,6 +618,14 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -612,6 +618,14 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
r = get_user(vcpu->arch.pfault_select, r = get_user(vcpu->arch.pfault_select,
(u64 __user *)reg->addr); (u64 __user *)reg->addr);
break; break;
case KVM_REG_S390_PP:
r = get_user(vcpu->arch.sie_block->pp,
(u64 __user *)reg->addr);
break;
case KVM_REG_S390_GBEA:
r = get_user(vcpu->arch.sie_block->gbea,
(u64 __user *)reg->addr);
break;
default: default:
break; break;
} }
...@@ -935,7 +949,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -935,7 +949,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); BUG_ON(kvm_get_vcpu(vcpu->kvm, vcpu->vcpu_id) == NULL);
switch (kvm_run->exit_reason) { switch (kvm_run->exit_reason) {
case KVM_EXIT_S390_SIEIC: case KVM_EXIT_S390_SIEIC:
......
...@@ -396,15 +396,10 @@ static int handle_stidp(struct kvm_vcpu *vcpu) ...@@ -396,15 +396,10 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{ {
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
int cpus = 0; int cpus = 0;
int n; int n;
spin_lock(&fi->lock); cpus = atomic_read(&vcpu->kvm->online_vcpus);
for (n = 0; n < KVM_MAX_VCPUS; n++)
if (fi->local_int[n])
cpus++;
spin_unlock(&fi->lock);
/* deal with other level 3 hypervisors */ /* deal with other level 3 hypervisors */
if (stsi(mem, 3, 2, 2)) if (stsi(mem, 3, 2, 2))
......
...@@ -23,29 +23,30 @@ ...@@ -23,29 +23,30 @@
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
u64 *reg) u64 *reg)
{ {
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
int cpuflags;
int rc; int rc;
if (cpu_addr >= KVM_MAX_VCPUS) if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock); dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (fi->local_int[cpu_addr] == NULL) if (!dst_vcpu)
rc = SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) li = &dst_vcpu->arch.local_int;
& (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
cpuflags = atomic_read(li->cpuflags);
if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
else { else {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
if (atomic_read(fi->local_int[cpu_addr]->cpuflags) if (cpuflags & CPUSTAT_ECALL_PEND)
& CPUSTAT_ECALL_PEND)
*reg |= SIGP_STATUS_EXT_CALL_PENDING; *reg |= SIGP_STATUS_EXT_CALL_PENDING;
if (atomic_read(fi->local_int[cpu_addr]->cpuflags) if (cpuflags & CPUSTAT_STOPPED)
& CPUSTAT_STOPPED)
*reg |= SIGP_STATUS_STOPPED; *reg |= SIGP_STATUS_STOPPED;
rc = SIGP_CC_STATUS_STORED; rc = SIGP_CC_STATUS_STORED;
} }
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
return rc; return rc;
...@@ -53,10 +54,9 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, ...@@ -53,10 +54,9 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{ {
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc; struct kvm_vcpu *dst_vcpu = NULL;
if (cpu_addr >= KVM_MAX_VCPUS) if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
...@@ -68,13 +68,10 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) ...@@ -68,13 +68,10 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
inti->type = KVM_S390_INT_EMERGENCY; inti->type = KVM_S390_INT_EMERGENCY;
inti->emerg.code = vcpu->vcpu_id; inti->emerg.code = vcpu->vcpu_id;
spin_lock(&fi->lock); dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
li = fi->local_int[cpu_addr]; if (!dst_vcpu)
if (li == NULL) { return SIGP_CC_NOT_OPERATIONAL;
rc = SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int;
kfree(inti);
goto unlock;
}
spin_lock_bh(&li->lock); spin_lock_bh(&li->lock);
list_add_tail(&inti->list, &li->list); list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
...@@ -82,11 +79,9 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) ...@@ -82,11 +79,9 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
if (waitqueue_active(li->wq)) if (waitqueue_active(li->wq))
wake_up_interruptible(li->wq); wake_up_interruptible(li->wq);
spin_unlock_bh(&li->lock); spin_unlock_bh(&li->lock);
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
unlock:
spin_unlock(&fi->lock); return SIGP_CC_ORDER_CODE_ACCEPTED;
return rc;
} }
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
...@@ -122,10 +117,9 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, ...@@ -122,10 +117,9 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{ {
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc; struct kvm_vcpu *dst_vcpu = NULL;
if (cpu_addr >= KVM_MAX_VCPUS) if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
...@@ -137,13 +131,10 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) ...@@ -137,13 +131,10 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
inti->type = KVM_S390_INT_EXTERNAL_CALL; inti->type = KVM_S390_INT_EXTERNAL_CALL;
inti->extcall.code = vcpu->vcpu_id; inti->extcall.code = vcpu->vcpu_id;
spin_lock(&fi->lock); dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
li = fi->local_int[cpu_addr]; if (!dst_vcpu)
if (li == NULL) { return SIGP_CC_NOT_OPERATIONAL;
rc = SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int;
kfree(inti);
goto unlock;
}
spin_lock_bh(&li->lock); spin_lock_bh(&li->lock);
list_add_tail(&inti->list, &li->list); list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
...@@ -151,11 +142,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) ...@@ -151,11 +142,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
if (waitqueue_active(li->wq)) if (waitqueue_active(li->wq))
wake_up_interruptible(li->wq); wake_up_interruptible(li->wq);
spin_unlock_bh(&li->lock); spin_unlock_bh(&li->lock);
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
unlock:
spin_unlock(&fi->lock); return SIGP_CC_ORDER_CODE_ACCEPTED;
return rc;
} }
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
...@@ -189,31 +178,26 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) ...@@ -189,31 +178,26 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{ {
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
int rc; int rc;
if (cpu_addr >= KVM_MAX_VCPUS) if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock); dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
li = fi->local_int[cpu_addr]; if (!dst_vcpu)
if (li == NULL) { return SIGP_CC_NOT_OPERATIONAL;
rc = SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int;
goto unlock;
}
rc = __inject_sigp_stop(li, action); rc = __inject_sigp_stop(li, action);
unlock:
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
/* If the CPU has already been stopped, we still have /* If the CPU has already been stopped, we still have
* to save the status when doing stop-and-store. This * to save the status when doing stop-and-store. This
* has to be done after unlocking all spinlocks. */ * has to be done after unlocking all spinlocks. */
struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
rc = kvm_s390_store_status_unloaded(dst_vcpu, rc = kvm_s390_store_status_unloaded(dst_vcpu,
KVM_S390_STORE_STATUS_NOADDR); KVM_S390_STORE_STATUS_NOADDR);
} }
...@@ -249,12 +233,18 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) ...@@ -249,12 +233,18 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
u64 *reg) u64 *reg)
{ {
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li;
struct kvm_s390_local_interrupt *li = NULL; struct kvm_vcpu *dst_vcpu = NULL;
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc; int rc;
u8 tmp; u8 tmp;
if (cpu_addr < KVM_MAX_VCPUS)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
/* make sure that the new value is valid memory */ /* make sure that the new value is valid memory */
address = address & 0x7fffe000u; address = address & 0x7fffe000u;
if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
...@@ -268,18 +258,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, ...@@ -268,18 +258,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
if (!inti) if (!inti)
return SIGP_CC_BUSY; return SIGP_CC_BUSY;
spin_lock(&fi->lock);
if (cpu_addr < KVM_MAX_VCPUS)
li = fi->local_int[cpu_addr];
if (li == NULL) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
rc = SIGP_CC_STATUS_STORED;
kfree(inti);
goto out_fi;
}
spin_lock_bh(&li->lock); spin_lock_bh(&li->lock);
/* cpu must be in stopped state */ /* cpu must be in stopped state */
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
...@@ -302,8 +280,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, ...@@ -302,8 +280,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li: out_li:
spin_unlock_bh(&li->lock); spin_unlock_bh(&li->lock);
out_fi:
spin_unlock(&fi->lock);
return rc; return rc;
} }
...@@ -341,28 +317,26 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, ...@@ -341,28 +317,26 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
u64 *reg) u64 *reg)
{ {
struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
int rc; int rc;
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
if (cpu_addr >= KVM_MAX_VCPUS) if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock); dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (fi->local_int[cpu_addr] == NULL) if (!dst_vcpu)
rc = SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
else { li = &dst_vcpu->arch.local_int;
if (atomic_read(fi->local_int[cpu_addr]->cpuflags) if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
& CPUSTAT_RUNNING) { /* running */
/* running */ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
rc = SIGP_CC_ORDER_CODE_ACCEPTED; } else {
} else { /* not running */
/* not running */ *reg &= 0xffffffff00000000UL;
*reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_NOT_RUNNING;
*reg |= SIGP_STATUS_NOT_RUNNING; rc = SIGP_CC_STATUS_STORED;
rc = SIGP_CC_STATUS_STORED;
}
} }
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
rc); rc);
...@@ -373,26 +347,22 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, ...@@ -373,26 +347,22 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
/* Test whether the destination CPU is available and not busy */ /* Test whether the destination CPU is available and not busy */
static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
{ {
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED; int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
struct kvm_vcpu *dst_vcpu = NULL;
if (cpu_addr >= KVM_MAX_VCPUS) if (cpu_addr >= KVM_MAX_VCPUS)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock); dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
li = fi->local_int[cpu_addr]; if (!dst_vcpu)
if (li == NULL) { return SIGP_CC_NOT_OPERATIONAL;
rc = SIGP_CC_NOT_OPERATIONAL; li = &dst_vcpu->arch.local_int;
goto out;
}
spin_lock_bh(&li->lock); spin_lock_bh(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) if (li->action_bits & ACTION_STOP_ON_STOP)
rc = SIGP_CC_BUSY; rc = SIGP_CC_BUSY;
spin_unlock_bh(&li->lock); spin_unlock_bh(&li->lock);
out:
spin_unlock(&fi->lock);
return rc; return rc;
} }
......
...@@ -186,55 +186,71 @@ void airq_iv_release(struct airq_iv *iv) ...@@ -186,55 +186,71 @@ void airq_iv_release(struct airq_iv *iv)
EXPORT_SYMBOL(airq_iv_release); EXPORT_SYMBOL(airq_iv_release);
/** /**
* airq_iv_alloc_bit - allocate an irq bit from an interrupt vector * airq_iv_alloc - allocate irq bits from an interrupt vector
* @iv: pointer to an interrupt vector structure * @iv: pointer to an interrupt vector structure
* @num: number of consecutive irq bits to allocate
* *
* Returns the bit number of the allocated irq, or -1UL if no bit * Returns the bit number of the first irq in the allocated block of irqs,
* is available or the AIRQ_IV_ALLOC flag has not been specified * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
* specified
*/ */
unsigned long airq_iv_alloc_bit(struct airq_iv *iv) unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
{ {
unsigned long bit; unsigned long bit, i;
if (!iv->avail) if (!iv->avail || num == 0)
return -1UL; return -1UL;
spin_lock(&iv->lock); spin_lock(&iv->lock);
bit = find_first_bit_inv(iv->avail, iv->bits); bit = find_first_bit_inv(iv->avail, iv->bits);
if (bit < iv->bits) { while (bit + num <= iv->bits) {
clear_bit_inv(bit, iv->avail); for (i = 1; i < num; i++)
if (bit >= iv->end) if (!test_bit_inv(bit + i, iv->avail))
iv->end = bit + 1; break;
} else if (i >= num) {
/* Found a suitable block of irqs */
for (i = 0; i < num; i++)
clear_bit_inv(bit + i, iv->avail);
if (bit + num >= iv->end)
iv->end = bit + num + 1;
break;
}
bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
}
if (bit + num > iv->bits)
bit = -1UL; bit = -1UL;
spin_unlock(&iv->lock); spin_unlock(&iv->lock);
return bit; return bit;
} }
EXPORT_SYMBOL(airq_iv_alloc_bit); EXPORT_SYMBOL(airq_iv_alloc);
/** /**
* airq_iv_free_bit - free an irq bit of an interrupt vector * airq_iv_free - free irq bits of an interrupt vector
* @iv: pointer to interrupt vector structure * @iv: pointer to interrupt vector structure
* @bit: number of the irq bit to free * @bit: number of the first irq bit to free
* @num: number of consecutive irq bits to free
*/ */
void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit) void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
{ {
if (!iv->avail) unsigned long i;
if (!iv->avail || num == 0)
return; return;
spin_lock(&iv->lock); spin_lock(&iv->lock);
/* Clear (possibly left over) interrupt bit */ for (i = 0; i < num; i++) {
clear_bit_inv(bit, iv->vector); /* Clear (possibly left over) interrupt bit */
/* Make the bit position available again */ clear_bit_inv(bit + i, iv->vector);
set_bit_inv(bit, iv->avail); /* Make the bit positions available again */
if (bit == iv->end - 1) { set_bit_inv(bit + i, iv->avail);
}
if (bit + num >= iv->end) {
/* Find new end of bit-field */ /* Find new end of bit-field */
while (--iv->end > 0) while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
if (!test_bit_inv(iv->end - 1, iv->avail)) iv->end--;
break;
} }
spin_unlock(&iv->lock); spin_unlock(&iv->lock);
} }
EXPORT_SYMBOL(airq_iv_free_bit); EXPORT_SYMBOL(airq_iv_free);
/** /**
* airq_iv_scan - scan interrupt vector for non-zero bits * airq_iv_scan - scan interrupt vector for non-zero bits
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment