Commit ad9241f2 authored by Jim Mattson's avatar Jim Mattson Committed by Greg Kroah-Hartman

kvm: Change offset in kvm_write_guest_offset_cached to unsigned

[ Upstream commit 7a86dab8 ]

Since the offset is added directly to the hva from the
gfn_to_hva_cache, a negative offset could result in an out of bounds
write. The existing BUG_ON only checks for addresses beyond the end of
the gfn_to_hva_cache, not for addresses before the start of the
gfn_to_hva_cache.

Note that all current call sites have non-negative offsets.

Fixes: 4ec6e863 ("kvm: Introduce kvm_write_guest_offset_cached()")
Reported-by: default avatarCfir Cohen <cfir@google.com>
Signed-off-by: default avatarJim Mattson <jmattson@google.com>
Reviewed-by: default avatarCfir Cohen <cfir@google.com>
Reviewed-by: default avatarPeter Shier <pshier@google.com>
Reviewed-by: default avatarKrish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent fc090081
...@@ -694,7 +694,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, ...@@ -694,7 +694,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len); void *data, unsigned long len);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, int offset, unsigned long len); void *data, unsigned int offset,
unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len); gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
......
...@@ -1959,7 +1959,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, ...@@ -1959,7 +1959,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, int offset, unsigned long len) void *data, unsigned int offset,
unsigned long len)
{ {
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(kvm);
int r; int r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment