Commit 8fd75e12 authored by Cong Wang's avatar Cong Wang Committed by Cong Wang

x86: remove the second argument of k[un]map_atomic()

Acked-by: default avatarAvi Kivity <avi@redhat.com>
Acked-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarCong Wang <amwang@redhat.com>
parent 91f23593
...@@ -1107,12 +1107,12 @@ static int __driver_rfc4106_encrypt(struct aead_request *req) ...@@ -1107,12 +1107,12 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
one_entry_in_sg = 1; one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&src_sg_walk, req->src);
scatterwalk_start(&assoc_sg_walk, req->assoc); scatterwalk_start(&assoc_sg_walk, req->assoc);
src = scatterwalk_map(&src_sg_walk, 0); src = scatterwalk_map(&src_sg_walk);
assoc = scatterwalk_map(&assoc_sg_walk, 0); assoc = scatterwalk_map(&assoc_sg_walk);
dst = src; dst = src;
if (unlikely(req->src != req->dst)) { if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst); scatterwalk_start(&dst_sg_walk, req->dst);
dst = scatterwalk_map(&dst_sg_walk, 0); dst = scatterwalk_map(&dst_sg_walk);
} }
} else { } else {
...@@ -1136,11 +1136,11 @@ static int __driver_rfc4106_encrypt(struct aead_request *req) ...@@ -1136,11 +1136,11 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
* back to the packet. */ * back to the packet. */
if (one_entry_in_sg) { if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) { if (unlikely(req->src != req->dst)) {
scatterwalk_unmap(dst, 0); scatterwalk_unmap(dst);
scatterwalk_done(&dst_sg_walk, 0, 0); scatterwalk_done(&dst_sg_walk, 0, 0);
} }
scatterwalk_unmap(src, 0); scatterwalk_unmap(src);
scatterwalk_unmap(assoc, 0); scatterwalk_unmap(assoc);
scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&src_sg_walk, 0, 0);
scatterwalk_done(&assoc_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0);
} else { } else {
...@@ -1189,12 +1189,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) ...@@ -1189,12 +1189,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
one_entry_in_sg = 1; one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&src_sg_walk, req->src);
scatterwalk_start(&assoc_sg_walk, req->assoc); scatterwalk_start(&assoc_sg_walk, req->assoc);
src = scatterwalk_map(&src_sg_walk, 0); src = scatterwalk_map(&src_sg_walk);
assoc = scatterwalk_map(&assoc_sg_walk, 0); assoc = scatterwalk_map(&assoc_sg_walk);
dst = src; dst = src;
if (unlikely(req->src != req->dst)) { if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst); scatterwalk_start(&dst_sg_walk, req->dst);
dst = scatterwalk_map(&dst_sg_walk, 0); dst = scatterwalk_map(&dst_sg_walk);
} }
} else { } else {
...@@ -1219,11 +1219,11 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) ...@@ -1219,11 +1219,11 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
if (one_entry_in_sg) { if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) { if (unlikely(req->src != req->dst)) {
scatterwalk_unmap(dst, 0); scatterwalk_unmap(dst);
scatterwalk_done(&dst_sg_walk, 0, 0); scatterwalk_done(&dst_sg_walk, 0, 0);
} }
scatterwalk_unmap(src, 0); scatterwalk_unmap(src);
scatterwalk_unmap(assoc, 0); scatterwalk_unmap(assoc);
scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&src_sg_walk, 0, 0);
scatterwalk_done(&assoc_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0);
} else { } else {
......
...@@ -62,16 +62,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ...@@ -62,16 +62,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!userbuf) { if (!userbuf) {
memcpy(buf, (vaddr + offset), csize); memcpy(buf, (vaddr + offset), csize);
kunmap_atomic(vaddr, KM_PTE0); kunmap_atomic(vaddr);
} else { } else {
if (!kdump_buf_page) { if (!kdump_buf_page) {
printk(KERN_WARNING "Kdump: Kdump buffer page not" printk(KERN_WARNING "Kdump: Kdump buffer page not"
" allocated\n"); " allocated\n");
kunmap_atomic(vaddr, KM_PTE0); kunmap_atomic(vaddr);
return -EFAULT; return -EFAULT;
} }
copy_page(kdump_buf_page, vaddr); copy_page(kdump_buf_page, vaddr);
kunmap_atomic(vaddr, KM_PTE0); kunmap_atomic(vaddr);
if (copy_to_user(buf, (kdump_buf_page + offset), csize)) if (copy_to_user(buf, (kdump_buf_page + offset), csize))
return -EFAULT; return -EFAULT;
} }
......
...@@ -1283,9 +1283,9 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) ...@@ -1283,9 +1283,9 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
return; return;
vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
kunmap_atomic(vapic, KM_USER0); kunmap_atomic(vapic);
apic_set_tpr(vcpu->arch.apic, data & 0xff); apic_set_tpr(vcpu->arch.apic, data & 0xff);
} }
...@@ -1310,9 +1310,9 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) ...@@ -1310,9 +1310,9 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0; max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
*(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
kunmap_atomic(vapic, KM_USER0); kunmap_atomic(vapic);
} }
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
......
...@@ -92,9 +92,9 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -92,9 +92,9 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
if (unlikely(npages != 1)) if (unlikely(npages != 1))
return -EFAULT; return -EFAULT;
table = kmap_atomic(page, KM_USER0); table = kmap_atomic(page);
ret = CMPXCHG(&table[index], orig_pte, new_pte); ret = CMPXCHG(&table[index], orig_pte, new_pte);
kunmap_atomic(table, KM_USER0); kunmap_atomic(table);
kvm_release_page_dirty(page); kvm_release_page_dirty(page);
......
...@@ -1162,12 +1162,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) ...@@ -1162,12 +1162,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/ */
vcpu->hv_clock.version += 2; vcpu->hv_clock.version += 2;
shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); shared_kaddr = kmap_atomic(vcpu->time_page);
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
sizeof(vcpu->hv_clock)); sizeof(vcpu->hv_clock));
kunmap_atomic(shared_kaddr, KM_USER0); kunmap_atomic(shared_kaddr);
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
return 0; return 0;
...@@ -3848,7 +3848,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, ...@@ -3848,7 +3848,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
goto emul_write; goto emul_write;
} }
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
kaddr += offset_in_page(gpa); kaddr += offset_in_page(gpa);
switch (bytes) { switch (bytes) {
case 1: case 1:
...@@ -3866,7 +3866,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, ...@@ -3866,7 +3866,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
default: default:
BUG(); BUG();
} }
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
kvm_release_page_dirty(page); kvm_release_page_dirty(page);
if (!exchanged) if (!exchanged)
......
...@@ -760,9 +760,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, ...@@ -760,9 +760,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
break; break;
} }
maddr = kmap_atomic(pg, KM_USER0); maddr = kmap_atomic(pg);
memcpy(maddr + offset, from, len); memcpy(maddr + offset, from, len);
kunmap_atomic(maddr, KM_USER0); kunmap_atomic(maddr);
set_page_dirty_lock(pg); set_page_dirty_lock(pg);
put_page(pg); put_page(pg);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment