Commit 30bac164 authored by Linus Torvalds's avatar Linus Torvalds

Revert "Change mincore() to count "mapped" pages rather than "cached" pages"

This reverts commit 574823bf.

It turns out that my hope that we could just remove the code that
exposes the cache residency status from mincore() was too optimistic.

There are various random users that want it, and one example would be
the Netflix database cluster maintenance. To quote Josh Snyder:

 "For Netflix, losing accurate information from the mincore syscall
  would lengthen database cluster maintenance operations from days to
  months. We rely on cross-process mincore to migrate the contents of a
  page cache from machine to machine, and across reboots.

  To do this, I wrote and maintain happycache [1], a page cache
  dumper/loader tool. It is quite similar in architecture to pgfincore,
  except that it is agnostic to workload. The gist of happycache's
  operation is "produce a dump of residence status for each page, do
  some operation, then reload exactly the same pages which were present
  before." happycache is entirely dependent on accurate reporting of the
  in-core status of file-backed pages, as accessed by another process.

  We primarily use happycache with Cassandra, which (like Postgres +
  pgfincore) relies heavily on OS page cache to reduce disk accesses.
  Because our workloads never experience a cold page cache, we are able
  to provision hardware for a peak utilization level that is far lower
  than the hypothetical "every query is a cache miss" peak.

  A database warmed by happycache can be ready for service in seconds
  (bounded only by the performance of the drives and the I/O subsystem),
  with no period of in-service degradation. By contrast, putting a
  database in service without a page cache entails a potentially
  unbounded period of degradation (at Netflix, the time to populate a
  single node's cache via natural cache misses varies by workload from
  hours to weeks). If a single node upgrade were to take weeks, then
  upgrading an entire cluster would take months. Since we want to apply
  security upgrades (and other things) on a somewhat tighter schedule,
  we would have to develop more complex solutions to provide the same
  functionality already provided by mincore.

  At the bottom line, happycache is designed to benignly exploit the
  same information leak documented in the paper [2]. I think it makes
  perfect sense to remove cross-process mincore functionality from
  unprivileged users, but not to remove it entirely"

We do have an alternate approach that limits the cache residency
reporting only to processes that have write permissions to the file, so
we can fix the original information leak issue that way.  It involves
_adding_ code rather than removing it, which is sad, but hey, at least
we haven't found any users that would find the restrictions
unacceptable.

So revert the optimistic first approach to make room for that alternate
fix instead.
Reported-by: default avatarJosh Snyder <joshs@netflix.com>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Dominique Martinet <asmadeus@codewreck.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Kevin Easton <kevin@guarana.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Cyril Hrubis <chrubis@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Daniel Gruss <daniel@gruss.cc>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent db781446
...@@ -42,14 +42,72 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, ...@@ -42,14 +42,72 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
return 0; return 0;
} }
static int mincore_unmapped_range(unsigned long addr, unsigned long end, /*
struct mm_walk *walk) * Later we can get more picky about what "in core" means precisely.
* For now, simply check to see if the page is in the page cache,
* and is up to date; i.e. that no page-in operation would be required
* at this time if an application were to map and access this page.
*/
static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
{
unsigned char present = 0;
struct page *page;
/*
* When tmpfs swaps out a page from a file, any process mapping that
* file will not get a swp_entry_t in its pte, but rather it is like
* any other file mapping (ie. marked !present and faulted in with
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
*/
#ifdef CONFIG_SWAP
if (shmem_mapping(mapping)) {
page = find_get_entry(mapping, pgoff);
/*
* shmem/tmpfs may return swap: account for swapcache
* page too.
*/
if (xa_is_value(page)) {
swp_entry_t swp = radix_to_swp_entry(page);
page = find_get_page(swap_address_space(swp),
swp_offset(swp));
}
} else
page = find_get_page(mapping, pgoff);
#else
page = find_get_page(mapping, pgoff);
#endif
if (page) {
present = PageUptodate(page);
put_page(page);
}
return present;
}
static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
struct vm_area_struct *vma, unsigned char *vec)
{ {
unsigned char *vec = walk->private;
unsigned long nr = (end - addr) >> PAGE_SHIFT; unsigned long nr = (end - addr) >> PAGE_SHIFT;
int i;
memset(vec, 0, nr); if (vma->vm_file) {
walk->private += nr; pgoff_t pgoff;
pgoff = linear_page_index(vma, addr);
for (i = 0; i < nr; i++, pgoff++)
vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
} else {
for (i = 0; i < nr; i++)
vec[i] = 0;
}
return nr;
}
static int mincore_unmapped_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
walk->private += __mincore_unmapped_range(addr, end,
walk->vma, walk->private);
return 0; return 0;
} }
...@@ -69,9 +127,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -69,9 +127,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
goto out; goto out;
} }
/* We'll consider a THP page under construction to be there */
if (pmd_trans_unstable(pmd)) { if (pmd_trans_unstable(pmd)) {
memset(vec, 1, nr); __mincore_unmapped_range(addr, end, vma, vec);
goto out; goto out;
} }
...@@ -80,17 +137,28 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -80,17 +137,28 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t pte = *ptep; pte_t pte = *ptep;
if (pte_none(pte)) if (pte_none(pte))
*vec = 0; __mincore_unmapped_range(addr, addr + PAGE_SIZE,
vma, vec);
else if (pte_present(pte)) else if (pte_present(pte))
*vec = 1; *vec = 1;
else { /* pte is a swap entry */ else { /* pte is a swap entry */
swp_entry_t entry = pte_to_swp_entry(pte); swp_entry_t entry = pte_to_swp_entry(pte);
/* if (non_swap_entry(entry)) {
* migration or hwpoison entries are always /*
* uptodate * migration or hwpoison entries are always
*/ * uptodate
*vec = !!non_swap_entry(entry); */
*vec = 1;
} else {
#ifdef CONFIG_SWAP
*vec = mincore_page(swap_address_space(entry),
swp_offset(entry));
#else
WARN_ON(1);
*vec = 1;
#endif
}
} }
vec++; vec++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment