Commit c4342633 authored by Ingo Molnar's avatar Ingo Molnar

x86: Fix leftover comment typos

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6f0d271d
...@@ -623,7 +623,7 @@ bool hv_query_ext_cap(u64 cap_query) ...@@ -623,7 +623,7 @@ bool hv_query_ext_cap(u64 cap_query)
* output parameter to the hypercall below and so it should be * output parameter to the hypercall below and so it should be
* compatible with 'virt_to_phys'. Which means, it's address should be * compatible with 'virt_to_phys'. Which means, it's address should be
* directly mapped. Use 'static' to keep it compatible; stack variables * directly mapped. Use 'static' to keep it compatible; stack variables
* can be virtually mapped, making them imcompatible with * can be virtually mapped, making them incompatible with
* 'virt_to_phys'. * 'virt_to_phys'.
* Hypercall input/output addresses should also be 8-byte aligned. * Hypercall input/output addresses should also be 8-byte aligned.
*/ */
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
/* /*
* This file contains both data structures defined by SGX architecture and Linux * This file contains both data structures defined by SGX architecture and Linux
* defined software data structures and functions. The two should not be mixed * defined software data structures and functions. The two should not be mixed
* together for better readibility. The architectural definitions come first. * together for better readability. The architectural definitions come first.
*/ */
/* The SGX specific CPUID function. */ /* The SGX specific CPUID function. */
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
* The same segment is shared by percpu area and stack canary. On * The same segment is shared by percpu area and stack canary. On
* x86_64, percpu symbols are zero based and %gs (64-bit) points to the * x86_64, percpu symbols are zero based and %gs (64-bit) points to the
* base of percpu area. The first occupant of the percpu area is always * base of percpu area. The first occupant of the percpu area is always
* fixed_percpu_data which contains stack_canary at the approproate * fixed_percpu_data which contains stack_canary at the appropriate
* offset. On x86_32, the stack canary is just a regular percpu * offset. On x86_32, the stack canary is just a regular percpu
* variable. * variable.
* *
......
...@@ -674,7 +674,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn) ...@@ -674,7 +674,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn)
break; break;
if (insn->addr_bytes != sizeof(unsigned long)) if (insn->addr_bytes != sizeof(unsigned long))
return -EOPNOTSUPP; /* Don't support differnt size */ return -EOPNOTSUPP; /* Don't support different size */
if (X86_MODRM_MOD(opcode) != 3) if (X86_MODRM_MOD(opcode) != 3)
return -EOPNOTSUPP; /* TODO: support memory addressing */ return -EOPNOTSUPP; /* TODO: support memory addressing */
......
...@@ -2374,7 +2374,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) ...@@ -2374,7 +2374,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
* page is available, while the caller may end up allocating as many as * page is available, while the caller may end up allocating as many as
* four pages, e.g. for PAE roots or for 5-level paging. Temporarily * four pages, e.g. for PAE roots or for 5-level paging. Temporarily
* exceeding the (arbitrary by default) limit will not harm the host, * exceeding the (arbitrary by default) limit will not harm the host,
* being too agressive may unnecessarily kill the guest, and getting an * being too aggressive may unnecessarily kill the guest, and getting an
* exact count is far more trouble than it's worth, especially in the * exact count is far more trouble than it's worth, especially in the
* page fault paths. * page fault paths.
*/ */
......
...@@ -1017,7 +1017,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -1017,7 +1017,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (!is_shadow_present_pte(iter.old_spte)) { if (!is_shadow_present_pte(iter.old_spte)) {
/* /*
* If SPTE has been forzen by another thread, just * If SPTE has been frozen by another thread, just
* give up and retry, avoiding unnecessary page table * give up and retry, avoiding unnecessary page table
* allocation and free. * allocation and free.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment