Commit e64fb7e2 authored by Suraj Jitindar Singh's avatar Suraj Jitindar Singh Committed by Paul Mackerras

KVM: PPC: Book3S HV: Move struct kvmppc_vcore from kvm_host.h to kvm_book3s.h

The next commit will introduce a member to the kvmppc_vcore struct which
references MAX_SMT_THREADS which is defined in kvm_book3s_asm.h, however
this file isn't included in kvm_host.h directly. Thus compiling for
certain platforms such as pmac32_defconfig and ppc64e_defconfig with KVM
fails due to MAX_SMT_THREADS not being defined.

Move the struct kvmppc_vcore definition to kvm_book3s.h which explicitly
includes kvm_book3s_asm.h.
Signed-off-by: default avatarSuraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 4b3d173d
......@@ -69,6 +69,41 @@ struct hpte_cache {
int pagesize;
};
/*
* Struct for a virtual core.
* Note: entry_exit_map combines a bitmap of threads that have entered
* in the bottom 8 bits and a bitmap of threads that have exited in the
* next 8 bits. This is so that we can atomically set the entry bit
* iff the exit map is 0 without taking a lock.
*/
struct kvmppc_vcore {
int n_runnable;
int num_threads;
int entry_exit_map;
int napping_threads;
int first_vcpuid;
u16 pcpu;
u16 last_cpu;
u8 vcore_state;
u8 in_guest;
struct kvmppc_vcore *master_vcore;
struct list_head runnable_threads;
struct list_head preempt_list;
spinlock_t lock;
struct swait_queue_head wq;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
struct kvm_vcpu *runner;
struct kvm *kvm;
u64 tb_offset; /* guest timebase - host timebase */
ulong lpcr;
u32 arch_compat;
ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */
ulong conferring_threads;
};
struct kvmppc_vcpu_book3s {
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct {
......
......@@ -277,41 +277,6 @@ struct kvm_arch {
#endif
};
/*
* Struct for a virtual core.
* Note: entry_exit_map combines a bitmap of threads that have entered
* in the bottom 8 bits and a bitmap of threads that have exited in the
* next 8 bits. This is so that we can atomically set the entry bit
* iff the exit map is 0 without taking a lock.
*/
struct kvmppc_vcore {
int n_runnable;
int num_threads;
int entry_exit_map;
int napping_threads;
int first_vcpuid;
u16 pcpu;
u16 last_cpu;
u8 vcore_state;
u8 in_guest;
struct kvmppc_vcore *master_vcore;
struct list_head runnable_threads;
struct list_head preempt_list;
spinlock_t lock;
struct swait_queue_head wq;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
struct kvm_vcpu *runner;
struct kvm *kvm;
u64 tb_offset; /* guest timebase - host timebase */
ulong lpcr;
u32 arch_compat;
ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */
ulong conferring_threads;
};
#define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
#define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
#define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment