Commit 455716fa authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity

KVM: SVM: Move msrpm offset calculation to seperate function

The algorithm to find the offset in the msrpm for a given
msr is needed at other places too. Move that logic to its
own function.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent d2477826
...@@ -117,6 +117,8 @@ struct vcpu_svm { ...@@ -117,6 +117,8 @@ struct vcpu_svm {
unsigned long int3_rip; unsigned long int3_rip;
}; };
#define MSR_INVALID 0xffffffffU
/* enable NPT for AMD64 and X86 with PAE */ /* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true; static bool npt_enabled = true;
...@@ -200,6 +202,27 @@ static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; ...@@ -200,6 +202,27 @@ static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
#define MSRS_RANGE_SIZE 2048 #define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
static u32 svm_msrpm_offset(u32 msr)
{
u32 offset;
int i;
for (i = 0; i < NUM_MSR_MAPS; i++) {
if (msr < msrpm_ranges[i] ||
msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
continue;
offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
offset += (i * MSRS_RANGE_SIZE); /* add range offset */
/* Now we have the u8 offset - but need the u32 offset */
return offset / 4;
}
/* MSR not in any range */
return MSR_INVALID;
}
#define MAX_INST_SIZE 15 #define MAX_INST_SIZE 15
static inline u32 svm_has(u32 feat) static inline u32 svm_has(u32 feat)
...@@ -418,23 +441,21 @@ static int svm_cpu_init(int cpu) ...@@ -418,23 +441,21 @@ static int svm_cpu_init(int cpu)
static void set_msr_interception(u32 *msrpm, unsigned msr, static void set_msr_interception(u32 *msrpm, unsigned msr,
int read, int write) int read, int write)
{ {
int i; u8 bit_read, bit_write;
unsigned long tmp;
u32 offset;
for (i = 0; i < NUM_MSR_MAPS; i++) { offset = svm_msrpm_offset(msr);
if (msr >= msrpm_ranges[i] && bit_read = 2 * (msr & 0x0f);
msr < msrpm_ranges[i] + MSRS_IN_RANGE) { bit_write = 2 * (msr & 0x0f) + 1;
u32 msr_offset = (i * MSRS_IN_RANGE + msr - tmp = msrpm[offset];
msrpm_ranges[i]) * 2;
BUG_ON(offset == MSR_INVALID);
u32 *base = msrpm + (msr_offset / 32);
u32 msr_shift = msr_offset % 32; read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1); write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
*base = (*base & ~(0x3 << msr_shift)) |
(mask << msr_shift); msrpm[offset] = tmp;
return;
}
}
BUG();
} }
static void svm_vcpu_init_msrpm(u32 *msrpm) static void svm_vcpu_init_msrpm(u32 *msrpm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment