Commit a8d908b5 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: report sev_pin_memory errors with PTR_ERR

Callers of sev_pin_memory() treat
NULL differently:

sev_launch_secret()/svm_register_enc_region() return -ENOMEM
sev_dbg_crypt() returns -EFAULT.

Switching to ERR_PTR() preserves the error and enables cleaner reporting of
different kinds of failures.
Suggested-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent dc42c8ae
...@@ -320,7 +320,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, ...@@ -320,7 +320,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
unsigned long first, last; unsigned long first, last;
if (ulen == 0 || uaddr + ulen < uaddr) if (ulen == 0 || uaddr + ulen < uaddr)
return NULL; return ERR_PTR(-EINVAL);
/* Calculate number of pages. */ /* Calculate number of pages. */
first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
...@@ -331,11 +331,11 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, ...@@ -331,11 +331,11 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit); pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
return NULL; return ERR_PTR(-ENOMEM);
} }
if (WARN_ON_ONCE(npages > INT_MAX)) if (WARN_ON_ONCE(npages > INT_MAX))
return NULL; return ERR_PTR(-EINVAL);
/* Avoid using vmalloc for smaller buffers. */ /* Avoid using vmalloc for smaller buffers. */
size = npages * sizeof(struct page *); size = npages * sizeof(struct page *);
...@@ -345,7 +345,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, ...@@ -345,7 +345,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
pages = kmalloc(size, GFP_KERNEL_ACCOUNT); pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
if (!pages) if (!pages)
return NULL; return ERR_PTR(-ENOMEM);
/* Pin the user virtual address. */ /* Pin the user virtual address. */
npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
...@@ -360,11 +360,13 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, ...@@ -360,11 +360,13 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
return pages; return pages;
err: err:
if (npinned > 0) if (npinned > 0) {
unpin_user_pages(pages, npinned); unpin_user_pages(pages, npinned);
npinned = -ENOMEM;
}
kvfree(pages); kvfree(pages);
return NULL; return ERR_PTR(npinned);
} }
static void sev_unpin_memory(struct kvm *kvm, struct page **pages, static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
...@@ -864,8 +866,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) ...@@ -864,8 +866,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
return -EFAULT; return -EFAULT;
pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1); pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
if (!pages) if (IS_ERR(pages))
return -ENOMEM; return PTR_ERR(pages);
/* /*
* The secret must be copied into contiguous memory region, lets verify * The secret must be copied into contiguous memory region, lets verify
...@@ -991,8 +993,8 @@ int svm_register_enc_region(struct kvm *kvm, ...@@ -991,8 +993,8 @@ int svm_register_enc_region(struct kvm *kvm,
return -ENOMEM; return -ENOMEM;
region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1); region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
if (!region->pages) { if (IS_ERR(region->pages)) {
ret = -ENOMEM; ret = PTR_ERR(region->pages);
goto e_free; goto e_free;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment