Commit b1fb185f authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Ben Hutchings

pagemap: hide physical addresses from non-privileged users

commit 1c90308e upstream.

This patch makes pagemap readable for normal users and hides physical
addresses from them.  For some use-cases PFN isn't required at all.

See http://lkml.kernel.org/r/1425935472-17949-1-git-send-email-kirill@shutemov.name

Fixes: ab676b7d ("pagemap: do not leak physical addresses to non-privileged userspace")
Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: default avatarMark Williamson <mwilliamson@undo-software.com>
Tested-by: default avatarMark Williamson <mwilliamson@undo-software.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
[bwh: Backported to 3.2:
 - Add the same check in the places where we look up a PFN
 - Add struct pagemapread * parameters where necessary
 - Open-code file_ns_capable()
 - Delete pagemap_open() entirely, as it would always return 0]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 9d3eb706
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/rmap.h> #include <linux/rmap.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/swapops.h> #include <linux/swapops.h>
#include <linux/security.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -606,6 +607,7 @@ const struct file_operations proc_clear_refs_operations = { ...@@ -606,6 +607,7 @@ const struct file_operations proc_clear_refs_operations = {
struct pagemapread { struct pagemapread {
int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
u64 *buffer; u64 *buffer;
bool show_pfn;
}; };
#define PM_ENTRY_BYTES sizeof(u64) #define PM_ENTRY_BYTES sizeof(u64)
...@@ -654,14 +656,14 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte) ...@@ -654,14 +656,14 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte)
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
} }
static u64 pte_to_pagemap_entry(pte_t pte) static u64 pte_to_pagemap_entry(struct pagemapread *pm, pte_t pte)
{ {
u64 pme = 0; u64 pme = 0;
if (is_swap_pte(pte)) if (is_swap_pte(pte))
pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
else if (pte_present(pte)) else if (pte_present(pte))
pme = PM_PFRAME(pte_pfn(pte)) pme = (pm->show_pfn ? PM_PFRAME(pte_pfn(pte)) : 0)
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
return pme; return pme;
} }
...@@ -693,7 +695,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -693,7 +695,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (vma && (vma->vm_start <= addr) && if (vma && (vma->vm_start <= addr) &&
!is_vm_hugetlb_page(vma)) { !is_vm_hugetlb_page(vma)) {
pte = pte_offset_map(pmd, addr); pte = pte_offset_map(pmd, addr);
pfn = pte_to_pagemap_entry(*pte); pfn = pte_to_pagemap_entry(pm, *pte);
/* unmap before userspace copy */ /* unmap before userspace copy */
pte_unmap(pte); pte_unmap(pte);
} }
...@@ -708,11 +710,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -708,11 +710,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} }
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) static u64 huge_pte_to_pagemap_entry(struct pagemapread *pm, pte_t pte, int offset)
{ {
u64 pme = 0; u64 pme = 0;
if (pte_present(pte)) if (pte_present(pte))
pme = PM_PFRAME(pte_pfn(pte) + offset) pme = (pm->show_pfn ? PM_PFRAME(pte_pfn(pte) + offset) : 0)
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
return pme; return pme;
} }
...@@ -728,7 +730,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, ...@@ -728,7 +730,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
for (; addr != end; addr += PAGE_SIZE) { for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT; int offset = (addr & ~hmask) >> PAGE_SHIFT;
pfn = huge_pte_to_pagemap_entry(*pte, offset); pfn = huge_pte_to_pagemap_entry(pm, *pte, offset);
err = add_to_pagemap(addr, pfn, pm); err = add_to_pagemap(addr, pfn, pm);
if (err) if (err)
return err; return err;
...@@ -792,6 +794,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, ...@@ -792,6 +794,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!count) if (!count)
goto out_task; goto out_task;
/* do not disclose physical addresses: attack vector */
pm.show_pfn = !security_capable(&init_user_ns, file->f_cred,
CAP_SYS_ADMIN);
pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
ret = -ENOMEM; ret = -ENOMEM;
...@@ -864,19 +870,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, ...@@ -864,19 +870,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
return ret; return ret;
} }
static int pagemap_open(struct inode *inode, struct file *file)
{
/* do not disclose physical addresses to unprivileged
userspace (closes a rowhammer attack vector) */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
const struct file_operations proc_pagemap_operations = { const struct file_operations proc_pagemap_operations = {
.llseek = mem_lseek, /* borrow this */ .llseek = mem_lseek, /* borrow this */
.read = pagemap_read, .read = pagemap_read,
.open = pagemap_open,
}; };
#endif /* CONFIG_PROC_PAGE_MONITOR */ #endif /* CONFIG_PROC_PAGE_MONITOR */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment