Commit 3f3f78b6 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Paolo Bonzini

KVM: MTRR: improve kvm_mtrr_get_guest_memory_type

 - kvm_mtrr_get_guest_memory_type() only checks one page in MTRRs so
   that it's unnecessary to check to see if the range is partially
   covered in MTRR

 - optimize the check of overlap memory type and add some comments
   to explain the precedence
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 86fd5270
...@@ -249,24 +249,22 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -249,24 +249,22 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 0; return 0;
} }
/* u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
* The function is based on mtrr_type_lookup() in
* arch/x86/kernel/cpu/mtrr/generic.c
*/
static int get_mtrr_type(struct kvm_mtrr *mtrr_state,
u64 start, u64 end)
{ {
u64 base, mask; struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
u8 prev_match, curr_match; u64 base, mask, start;
int i, num_var_ranges = KVM_NR_VAR_MTRR; int i, num_var_ranges, type;
const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
| (1 << MTRR_TYPE_WRTHROUGH);
start = gfn_to_gpa(gfn);
num_var_ranges = KVM_NR_VAR_MTRR;
type = -1;
/* MTRR is completely disabled, use UC for all of physical memory. */ /* MTRR is completely disabled, use UC for all of physical memory. */
if (!mtrr_is_enabled(mtrr_state)) if (!mtrr_is_enabled(mtrr_state))
return MTRR_TYPE_UNCACHABLE; return MTRR_TYPE_UNCACHABLE;
/* Make end inclusive end, instead of exclusive */
end--;
/* Look in fixed ranges. Just return the type as per start */ /* Look in fixed ranges. Just return the type as per start */
if (fixed_mtrr_is_enabled(mtrr_state) && (start < 0x100000)) { if (fixed_mtrr_is_enabled(mtrr_state) && (start < 0x100000)) {
int idx; int idx;
...@@ -291,9 +289,8 @@ static int get_mtrr_type(struct kvm_mtrr *mtrr_state, ...@@ -291,9 +289,8 @@ static int get_mtrr_type(struct kvm_mtrr *mtrr_state,
* Look of multiple ranges matching this address and pick type * Look of multiple ranges matching this address and pick type
* as per MTRR precedence * as per MTRR precedence
*/ */
prev_match = 0xFF;
for (i = 0; i < num_var_ranges; ++i) { for (i = 0; i < num_var_ranges; ++i) {
unsigned short start_state, end_state; int curr_type;
if (!(mtrr_state->var_ranges[i].mask & (1 << 11))) if (!(mtrr_state->var_ranges[i].mask & (1 << 11)))
continue; continue;
...@@ -301,50 +298,57 @@ static int get_mtrr_type(struct kvm_mtrr *mtrr_state, ...@@ -301,50 +298,57 @@ static int get_mtrr_type(struct kvm_mtrr *mtrr_state,
base = mtrr_state->var_ranges[i].base & PAGE_MASK; base = mtrr_state->var_ranges[i].base & PAGE_MASK;
mask = mtrr_state->var_ranges[i].mask & PAGE_MASK; mask = mtrr_state->var_ranges[i].mask & PAGE_MASK;
start_state = ((start & mask) == (base & mask));
end_state = ((end & mask) == (base & mask));
if (start_state != end_state)
return 0xFE;
if ((start & mask) != (base & mask)) if ((start & mask) != (base & mask))
continue; continue;
curr_match = mtrr_state->var_ranges[i].base & 0xff; /*
if (prev_match == 0xFF) { * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
prev_match = curr_match; * Precedences.
*/
curr_type = mtrr_state->var_ranges[i].base & 0xff;
if (type == -1) {
type = curr_type;
continue; continue;
} }
if (prev_match == MTRR_TYPE_UNCACHABLE || /*
curr_match == MTRR_TYPE_UNCACHABLE) * If two or more variable memory ranges match and the
* memory types are identical, then that memory type is
* used.
*/
if (type == curr_type)
continue;
/*
* If two or more variable memory ranges match and one of
* the memory types is UC, the UC memory type used.
*/
if (curr_type == MTRR_TYPE_UNCACHABLE)
return MTRR_TYPE_UNCACHABLE; return MTRR_TYPE_UNCACHABLE;
if ((prev_match == MTRR_TYPE_WRBACK && /*
curr_match == MTRR_TYPE_WRTHROUGH) || * If two or more variable memory ranges match and the
(prev_match == MTRR_TYPE_WRTHROUGH && * memory types are WT and WB, the WT memory type is used.
curr_match == MTRR_TYPE_WRBACK)) { */
prev_match = MTRR_TYPE_WRTHROUGH; if (((1 << type) & wt_wb_mask) &&
curr_match = MTRR_TYPE_WRTHROUGH; ((1 << curr_type) & wt_wb_mask)) {
type = MTRR_TYPE_WRTHROUGH;
continue;
} }
if (prev_match != curr_match) /*
return MTRR_TYPE_UNCACHABLE; * For overlaps not defined by the above rules, processor
* behavior is undefined.
*/
/* We use WB for this undefined behavior. :( */
return MTRR_TYPE_WRBACK;
} }
if (prev_match != 0xFF) if (type != -1)
return prev_match; return type;
return mtrr_default_type(mtrr_state); return mtrr_default_type(mtrr_state);
} }
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
{
u8 mtrr;
mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
(gfn << PAGE_SHIFT) + PAGE_SIZE);
if (mtrr == 0xfe || mtrr == 0xff)
mtrr = MTRR_TYPE_WRBACK;
return mtrr;
}
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment