Commit a4ea1864 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/rmap: add hugetlb sanity checks for anon rmap handling

Let's make sure we end up with the right folios in the right functions
when adding an anon rmap, just like we already do in the other rmap
functions.

Link: https://lkml.kernel.org/r/20231220224504.646757-7-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarRyan Roberts <ryan.roberts@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0c2ec32b
...@@ -1262,6 +1262,8 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, ...@@ -1262,6 +1262,8 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
bool compound = flags & RMAP_COMPOUND; bool compound = flags & RMAP_COMPOUND;
bool first; bool first;
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
/* Is page being mapped by PTE? Is this its first map to be added? */ /* Is page being mapped by PTE? Is this its first map to be added? */
if (likely(!compound)) { if (likely(!compound)) {
first = atomic_inc_and_test(&page->_mapcount); first = atomic_inc_and_test(&page->_mapcount);
...@@ -1343,6 +1345,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, ...@@ -1343,6 +1345,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
{ {
int nr = folio_nr_pages(folio); int nr = folio_nr_pages(folio);
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
VM_BUG_ON_VMA(address < vma->vm_start || VM_BUG_ON_VMA(address < vma->vm_start ||
address + (nr << PAGE_SHIFT) > vma->vm_end, vma); address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
__folio_set_swapbacked(folio); __folio_set_swapbacked(folio);
...@@ -2634,6 +2637,7 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) ...@@ -2634,6 +2637,7 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, rmap_t flags) unsigned long address, rmap_t flags)
{ {
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
atomic_inc(&folio->_entire_mapcount); atomic_inc(&folio->_entire_mapcount);
...@@ -2646,6 +2650,8 @@ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, ...@@ -2646,6 +2650,8 @@ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
void hugetlb_add_new_anon_rmap(struct folio *folio, void hugetlb_add_new_anon_rmap(struct folio *folio,
struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *vma, unsigned long address)
{ {
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
BUG_ON(address < vma->vm_start || address >= vma->vm_end); BUG_ON(address < vma->vm_start || address >= vma->vm_end);
/* increment count (starts at -1) */ /* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_entire_mapcount, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment