Commit 2de1a7e4 authored by Seth Jennings's avatar Seth Jennings Committed by Linus Torvalds

mm/swapfile.c: fix comment typos

Signed-off-by: default avatarSeth Jennings <sjenning@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7f88f88f
......@@ -707,7 +707,7 @@ swp_entry_t get_swap_page(void)
return (swp_entry_t) {0};
}
/* The only caller of this function is now susupend routine */
/* The only caller of this function is now suspend routine */
swp_entry_t get_swap_page_of_type(int type)
{
struct swap_info_struct *si;
......@@ -845,7 +845,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
}
/*
* Caller has made sure that the swapdevice corresponding to entry
* Caller has made sure that the swap device corresponding to entry
* is still around or has not been recycled.
*/
void swap_free(swp_entry_t entry)
......@@ -947,7 +947,7 @@ int try_to_free_swap(struct page *page)
* original page might be freed under memory pressure, then
* later read back in from swap, now with the wrong data.
*
* Hibration suspends storage while it is writing the image
* Hibernation suspends storage while it is writing the image
* to disk so check that here.
*/
if (pm_suspended_storage())
......@@ -1179,7 +1179,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
* some architectures (e.g. x86_32 with PAE) we might catch a glimpse
* of unmatched parts which look like swp_pte, so unuse_pte must
* recheck under pte lock. Scanning without pte lock lets it be
* preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
* preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
*/
pte = pte_offset_map(pmd, addr);
do {
......@@ -1934,7 +1934,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
vfree(swap_map);
vfree(cluster_info);
vfree(frontswap_map);
/* Destroy swap account informatin */
/* Destroy swap account information */
swap_cgroup_swapoff(type);
inode = mapping->host;
......@@ -2786,8 +2786,8 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
/*
* We are fortunate that although vmalloc_to_page uses pte_offset_map,
* no architecture is using highmem pages for kernel pagetables: so it
* will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
* no architecture is using highmem pages for kernel page tables: so it
* will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
*/
head = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment