Commit 457aef94 authored by Ethon Paul's avatar Ethon Paul Committed by Linus Torvalds

mm: ksm: fix a typo in comment "alreaady"->"already"

There is a typo in comment, fix it.
Signed-off-by: default avatarEthon Paul <ethp@qq.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Link: http://lkml.kernel.org/r/20200410162427.13927-1-ethp@qq.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 52cfc245
...@@ -612,7 +612,7 @@ static struct stable_node *alloc_stable_node_chain(struct stable_node *dup, ...@@ -612,7 +612,7 @@ static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
* Move the old stable node to the second dimension * Move the old stable node to the second dimension
* queued in the hlist_dup. The invariant is that all * queued in the hlist_dup. The invariant is that all
* dup stable_nodes in the chain->hlist point to pages * dup stable_nodes in the chain->hlist point to pages
* that are wrprotected and have the exact same * that are write protected and have the exact same
* content. * content.
*/ */
stable_node_chain_add_dup(dup, chain); stable_node_chain_add_dup(dup, chain);
...@@ -1148,7 +1148,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, ...@@ -1148,7 +1148,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
/* /*
* No need to check ksm_use_zero_pages here: we can only have a * No need to check ksm_use_zero_pages here: we can only have a
* zero_page here if ksm_use_zero_pages was enabled alreaady. * zero_page here if ksm_use_zero_pages was enabled already.
*/ */
if (!is_zero_pfn(page_to_pfn(kpage))) { if (!is_zero_pfn(page_to_pfn(kpage))) {
get_page(kpage); get_page(kpage);
...@@ -1608,7 +1608,7 @@ static struct page *stable_tree_search(struct page *page) ...@@ -1608,7 +1608,7 @@ static struct page *stable_tree_search(struct page *page)
* continue. All KSM pages belonging to the * continue. All KSM pages belonging to the
* stable_node dups in a stable_node chain * stable_node dups in a stable_node chain
* have the same content and they're * have the same content and they're
* wrprotected at all times. Any will work * write protected at all times. Any will work
* fine to continue the walk. * fine to continue the walk.
*/ */
tree_page = get_ksm_page(stable_node_any, tree_page = get_ksm_page(stable_node_any,
...@@ -1843,7 +1843,7 @@ static struct stable_node *stable_tree_insert(struct page *kpage) ...@@ -1843,7 +1843,7 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
* continue. All KSM pages belonging to the * continue. All KSM pages belonging to the
* stable_node dups in a stable_node chain * stable_node dups in a stable_node chain
* have the same content and they're * have the same content and they're
* wrprotected at all times. Any will work * write protected at all times. Any will work
* fine to continue the walk. * fine to continue the walk.
*/ */
tree_page = get_ksm_page(stable_node_any, tree_page = get_ksm_page(stable_node_any,
...@@ -2001,7 +2001,7 @@ static void stable_tree_append(struct rmap_item *rmap_item, ...@@ -2001,7 +2001,7 @@ static void stable_tree_append(struct rmap_item *rmap_item,
* duplicate. page_migration could break later if rmap breaks, * duplicate. page_migration could break later if rmap breaks,
* so we can as well crash here. We really need to check for * so we can as well crash here. We really need to check for
* rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
* for other negative values as an undeflow if detected here * for other negative values as an underflow if detected here
* for the first time (and not when decreasing rmap_hlist_len) * for the first time (and not when decreasing rmap_hlist_len)
* would be sign of memory corruption in the stable_node. * would be sign of memory corruption in the stable_node.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment