Commit 934d534f authored by Jes Sorensen's avatar Jes Sorensen Committed by Avi Kivity

KVM: ia64: dynamic nr online cpus

Account for number of online cpus and use that in loops iterating over
the list of vpus instead of scanning the full array unconditionally.
This patch is a building block to facilitate allowing to bump up
the size of MAX_VCPUS significantly.
Signed-off-by: default avatarJes Sorensen <jes@sgi.com>
Acked-by : Xiantao Zhang  <xiantao.zhang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent b7e6e4d3
...@@ -157,10 +157,10 @@ struct kvm_vm_data { ...@@ -157,10 +157,10 @@ struct kvm_vm_data {
struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
}; };
#define VCPU_BASE(n) KVM_VM_DATA_BASE + \ #define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, vcpu_data[n]) offsetof(struct kvm_vm_data, vcpu_data[n]))
#define VM_BASE KVM_VM_DATA_BASE + \ #define KVM_VM_BASE (KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, kvm_vm_struct) offsetof(struct kvm_vm_data, kvm_vm_struct))
#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, kvm_mem_dirty_log) offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
...@@ -464,6 +464,8 @@ struct kvm_arch { ...@@ -464,6 +464,8 @@ struct kvm_arch {
unsigned long metaphysical_rr4; unsigned long metaphysical_rr4;
unsigned long vmm_init_rr; unsigned long vmm_init_rr;
int online_vcpus;
struct kvm_ioapic *vioapic; struct kvm_ioapic *vioapic;
struct kvm_vm_stat stat; struct kvm_vm_stat stat;
struct kvm_sal_data rdv_sal_data; struct kvm_sal_data rdv_sal_data;
......
...@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, ...@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
union ia64_lid lid; union ia64_lid lid;
int i; int i;
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < kvm->arch.online_vcpus; i++) {
if (kvm->vcpus[i]) { if (kvm->vcpus[i]) {
lid.val = VCPU_LID(kvm->vcpus[i]); lid.val = VCPU_LID(kvm->vcpus[i]);
if (lid.id == id && lid.eid == eid) if (lid.id == id && lid.eid == eid)
...@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
call_data.ptc_g_data = p->u.ptc_g_data; call_data.ptc_g_data = p->u.ptc_g_data;
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < kvm->arch.online_vcpus; i++) {
if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
KVM_MP_STATE_UNINITIALIZED || KVM_MP_STATE_UNINITIALIZED ||
vcpu == kvm->vcpus[i]) vcpu == kvm->vcpus[i])
...@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void) ...@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
kvm_init_vm(kvm); kvm_init_vm(kvm);
kvm->arch.online_vcpus = 0;
return kvm; return kvm;
} }
...@@ -1154,7 +1156,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1154,7 +1156,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/*Initialize itc offset for vcpus*/ /*Initialize itc offset for vcpus*/
itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu + v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i); sizeof(struct kvm_vcpu_data) * i);
v->arch.itc_offset = itc_offset; v->arch.itc_offset = itc_offset;
...@@ -1288,6 +1290,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1288,6 +1290,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
goto fail; goto fail;
} }
kvm->arch.online_vcpus++;
return vcpu; return vcpu;
fail: fail:
return ERR_PTR(r); return ERR_PTR(r);
...@@ -1828,7 +1832,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, ...@@ -1828,7 +1832,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
struct kvm_vcpu *lvcpu = kvm->vcpus[0]; struct kvm_vcpu *lvcpu = kvm->vcpus[0];
int i; int i;
for (i = 1; i < KVM_MAX_VCPUS; i++) { for (i = 1; i < kvm->arch.online_vcpus; i++) {
if (!kvm->vcpus[i]) if (!kvm->vcpus[i])
continue; continue;
if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
......
...@@ -807,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); ...@@ -807,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
{ {
struct kvm_vcpu *v; struct kvm_vcpu *v;
struct kvm *kvm;
int i; int i;
long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
unsigned long vitv = VCPU(vcpu, itv); unsigned long vitv = VCPU(vcpu, itv);
kvm = (struct kvm *)KVM_VM_BASE;
if (vcpu->vcpu_id == 0) { if (vcpu->vcpu_id == 0) {
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu + v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i); sizeof(struct kvm_vcpu_data) * i);
VMX(v, itc_offset) = itc_offset; VMX(v, itc_offset) = itc_offset;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment