Commit 269e9552 authored by Hamza Mahfooz's avatar Hamza Mahfooz Committed by Paolo Bonzini

KVM: const-ify all relevant uses of struct kvm_memory_slot

As alluded to in commit f36f3f28 ("KVM: add "new" argument to
kvm_arch_commit_memory_region"), a bunch of other places where struct
kvm_memory_slot is used, needs to be refactored to preserve the
"const"ness of struct kvm_memory_slot across-the-board.
Signed-off-by: default avatarHamza Mahfooz <someguy@effective-light.com>
Message-Id: <20210713023338.57108-1-someguy@effective-light.com>
[Do not touch body of slot_rmap_walk_init. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 071064f1
...@@ -1537,12 +1537,12 @@ void kvm_mmu_uninit_vm(struct kvm *kvm); ...@@ -1537,12 +1537,12 @@ void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu); void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot, const struct kvm_memory_slot *memslot,
int start_level); int start_level);
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *memslot); const struct kvm_memory_slot *memslot);
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot); const struct kvm_memory_slot *memslot);
void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
......
...@@ -794,7 +794,7 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, ...@@ -794,7 +794,7 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
return &slot->arch.lpage_info[level - 2][idx]; return &slot->arch.lpage_info[level - 2][idx];
} }
static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
gfn_t gfn, int count) gfn_t gfn, int count)
{ {
struct kvm_lpage_info *linfo; struct kvm_lpage_info *linfo;
...@@ -807,12 +807,12 @@ static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, ...@@ -807,12 +807,12 @@ static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
} }
} }
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
{ {
update_gfn_disallow_lpage_count(slot, gfn, 1); update_gfn_disallow_lpage_count(slot, gfn, 1);
} }
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
{ {
update_gfn_disallow_lpage_count(slot, gfn, -1); update_gfn_disallow_lpage_count(slot, gfn, -1);
} }
...@@ -999,7 +999,7 @@ static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) ...@@ -999,7 +999,7 @@ static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
} }
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
unsigned long idx; unsigned long idx;
...@@ -1228,7 +1228,7 @@ static bool spte_wrprot_for_clear_dirty(u64 *sptep) ...@@ -1228,7 +1228,7 @@ static bool spte_wrprot_for_clear_dirty(u64 *sptep)
* Returns true iff any D or W bits were cleared. * Returns true iff any D or W bits were cleared.
*/ */
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head, static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
...@@ -1387,7 +1387,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) ...@@ -1387,7 +1387,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
} }
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
...@@ -1452,7 +1452,7 @@ static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, ...@@ -1452,7 +1452,7 @@ static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct slot_rmap_walk_iterator { struct slot_rmap_walk_iterator {
/* input fields. */ /* input fields. */
struct kvm_memory_slot *slot; const struct kvm_memory_slot *slot;
gfn_t start_gfn; gfn_t start_gfn;
gfn_t end_gfn; gfn_t end_gfn;
int start_level; int start_level;
...@@ -1479,7 +1479,7 @@ rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) ...@@ -1479,7 +1479,7 @@ rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
static void static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
struct kvm_memory_slot *slot, int start_level, const struct kvm_memory_slot *slot, int start_level,
int end_level, gfn_t start_gfn, gfn_t end_gfn) int end_level, gfn_t start_gfn, gfn_t end_gfn)
{ {
iterator->slot = slot; iterator->slot = slot;
...@@ -5313,12 +5313,13 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level, ...@@ -5313,12 +5313,13 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
EXPORT_SYMBOL_GPL(kvm_configure_mmu); EXPORT_SYMBOL_GPL(kvm_configure_mmu);
/* The return value indicates if tlb flush on all vcpus is needed. */ /* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head, typedef bool (*slot_level_handler) (struct kvm *kvm,
struct kvm_memory_slot *slot); struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot);
/* The caller should hold mmu-lock before calling this function. */ /* The caller should hold mmu-lock before calling this function. */
static __always_inline bool static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level, slot_level_handler fn, int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
bool flush) bool flush)
...@@ -5345,7 +5346,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -5345,7 +5346,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
} }
static __always_inline bool static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level, slot_level_handler fn, int start_level, int end_level,
bool flush_on_yield) bool flush_on_yield)
{ {
...@@ -5356,7 +5357,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -5356,7 +5357,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
} }
static __always_inline bool static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_leaf(struct kvm *kvm, const struct kvm_memory_slot *memslot,
slot_level_handler fn, bool flush_on_yield) slot_level_handler fn, bool flush_on_yield)
{ {
return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
...@@ -5615,7 +5616,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -5615,7 +5616,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (start >= end) if (start >= end)
continue; continue;
flush = slot_handle_level_range(kvm, memslot, flush = slot_handle_level_range(kvm,
(const struct kvm_memory_slot *) memslot,
kvm_zap_rmapp, PG_LEVEL_4K, kvm_zap_rmapp, PG_LEVEL_4K,
KVM_MAX_HUGEPAGE_LEVEL, start, KVM_MAX_HUGEPAGE_LEVEL, start,
end - 1, true, flush); end - 1, true, flush);
...@@ -5643,13 +5645,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -5643,13 +5645,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
static bool slot_rmap_write_protect(struct kvm *kvm, static bool slot_rmap_write_protect(struct kvm *kvm,
struct kvm_rmap_head *rmap_head, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
return __rmap_write_protect(kvm, rmap_head, false); return __rmap_write_protect(kvm, rmap_head, false);
} }
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot, const struct kvm_memory_slot *memslot,
int start_level) int start_level)
{ {
bool flush = false; bool flush = false;
...@@ -5685,7 +5687,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, ...@@ -5685,7 +5687,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
struct kvm_rmap_head *rmap_head, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
...@@ -5724,10 +5726,8 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, ...@@ -5724,10 +5726,8 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
} }
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *memslot) const struct kvm_memory_slot *slot)
{ {
/* FIXME: const-ify all uses of struct kvm_memory_slot. */
struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
bool flush = false; bool flush = false;
if (kvm_memslots_have_rmaps(kvm)) { if (kvm_memslots_have_rmaps(kvm)) {
...@@ -5763,7 +5763,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, ...@@ -5763,7 +5763,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
} }
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot) const struct kvm_memory_slot *memslot)
{ {
bool flush = false; bool flush = false;
......
...@@ -124,8 +124,8 @@ static inline bool is_nx_huge_page_enabled(void) ...@@ -124,8 +124,8 @@ static inline bool is_nx_huge_page_enabled(void)
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync); int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync);
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn, struct kvm_memory_slot *slot, u64 gfn,
int min_level); int min_level);
......
...@@ -1246,8 +1246,8 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, ...@@ -1246,8 +1246,8 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* only affect leaf SPTEs down to min_level. * only affect leaf SPTEs down to min_level.
* Returns true if an SPTE has been changed and the TLBs need to be flushed. * Returns true if an SPTE has been changed and the TLBs need to be flushed.
*/ */
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
int min_level) const struct kvm_memory_slot *slot, int min_level)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
bool spte_set = false; bool spte_set = false;
...@@ -1317,7 +1317,8 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, ...@@ -1317,7 +1317,8 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* each SPTE. Returns true if an SPTE has been changed and the TLBs need to * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
* be flushed. * be flushed.
*/ */
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot) bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
const struct kvm_memory_slot *slot)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
bool spte_set = false; bool spte_set = false;
......
...@@ -61,10 +61,10 @@ bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); ...@@ -61,10 +61,10 @@ bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
int min_level); const struct kvm_memory_slot *slot, int min_level);
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
struct kvm_memory_slot *slot); const struct kvm_memory_slot *slot);
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask, gfn_t gfn, unsigned long mask,
......
...@@ -11520,7 +11520,7 @@ static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable) ...@@ -11520,7 +11520,7 @@ static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
static void kvm_mmu_slot_apply_flags(struct kvm *kvm, static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
struct kvm_memory_slot *old, struct kvm_memory_slot *old,
struct kvm_memory_slot *new, const struct kvm_memory_slot *new,
enum kvm_mr_change change) enum kvm_mr_change change)
{ {
bool log_dirty_pages = new->flags & KVM_MEM_LOG_DIRTY_PAGES; bool log_dirty_pages = new->flags & KVM_MEM_LOG_DIRTY_PAGES;
...@@ -11600,10 +11600,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -11600,10 +11600,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm_mmu_change_mmu_pages(kvm, kvm_mmu_change_mmu_pages(kvm,
kvm_mmu_calculate_default_mmu_pages(kvm)); kvm_mmu_calculate_default_mmu_pages(kvm));
/* kvm_mmu_slot_apply_flags(kvm, old, new, change);
* FIXME: const-ify all uses of struct kvm_memory_slot.
*/
kvm_mmu_slot_apply_flags(kvm, old, (struct kvm_memory_slot *) new, change);
/* Free the arrays associated with the old memslot. */ /* Free the arrays associated with the old memslot. */
if (change == KVM_MR_MOVE) if (change == KVM_MR_MOVE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment