Commit 8d0920bd authored by David Hildenbrand's avatar David Hildenbrand

mm: remove VM_DENYWRITE

All in-tree users of MAP_DENYWRITE are gone. MAP_DENYWRITE cannot be
set from user space, so all users are gone; let's remove it.
Acked-by: default avatar"Eric W. Biederman" <ebiederm@xmission.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
parent 4589ff7c
...@@ -619,7 +619,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) ...@@ -619,7 +619,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
[ilog2(VM_MAYSHARE)] = "ms", [ilog2(VM_MAYSHARE)] = "ms",
[ilog2(VM_GROWSDOWN)] = "gd", [ilog2(VM_GROWSDOWN)] = "gd",
[ilog2(VM_PFNMAP)] = "pf", [ilog2(VM_PFNMAP)] = "pf",
[ilog2(VM_DENYWRITE)] = "dw",
[ilog2(VM_LOCKED)] = "lo", [ilog2(VM_LOCKED)] = "lo",
[ilog2(VM_IO)] = "io", [ilog2(VM_IO)] = "io",
[ilog2(VM_SEQ_READ)] = "sr", [ilog2(VM_SEQ_READ)] = "sr",
......
...@@ -281,7 +281,6 @@ extern unsigned int kobjsize(const void *objp); ...@@ -281,7 +281,6 @@ extern unsigned int kobjsize(const void *objp);
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
#define VM_LOCKED 0x00002000 #define VM_LOCKED 0x00002000
......
...@@ -153,7 +153,6 @@ static inline unsigned long ...@@ -153,7 +153,6 @@ static inline unsigned long
calc_vm_flag_bits(unsigned long flags) calc_vm_flag_bits(unsigned long flags)
{ {
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
arch_calc_vm_flag_bits(flags); arch_calc_vm_flag_bits(flags);
......
...@@ -165,7 +165,6 @@ IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison") ...@@ -165,7 +165,6 @@ IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
{VM_UFFD_MISSING, "uffd_missing" }, \ {VM_UFFD_MISSING, "uffd_missing" }, \
IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \ IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \
{VM_PFNMAP, "pfnmap" }, \ {VM_PFNMAP, "pfnmap" }, \
{VM_DENYWRITE, "denywrite" }, \
{VM_UFFD_WP, "uffd_wp" }, \ {VM_UFFD_WP, "uffd_wp" }, \
{VM_LOCKED, "locked" }, \ {VM_LOCKED, "locked" }, \
{VM_IO, "io" }, \ {VM_IO, "io" }, \
......
...@@ -8307,8 +8307,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) ...@@ -8307,8 +8307,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
else else
flags = MAP_PRIVATE; flags = MAP_PRIVATE;
if (vma->vm_flags & VM_DENYWRITE)
flags |= MAP_DENYWRITE;
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
flags |= MAP_LOCKED; flags |= MAP_LOCKED;
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
......
...@@ -570,12 +570,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, ...@@ -570,12 +570,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
file = tmp->vm_file; file = tmp->vm_file;
if (file) { if (file) {
struct inode *inode = file_inode(file);
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
get_file(file); get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
put_write_access(inode);
i_mmap_lock_write(mapping); i_mmap_lock_write(mapping);
if (tmp->vm_flags & VM_SHARED) if (tmp->vm_flags & VM_SHARED)
mapping_allow_writable(mapping); mapping_allow_writable(mapping);
......
...@@ -675,9 +675,8 @@ flags(void) ...@@ -675,9 +675,8 @@ flags(void)
"uptodate|dirty|lru|active|swapbacked", "uptodate|dirty|lru|active|swapbacked",
cmp_buffer); cmp_buffer);
flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
| VM_DENYWRITE; test("read|exec|mayread|maywrite|mayexec", "%pGv", &flags);
test("read|exec|mayread|maywrite|mayexec|denywrite", "%pGv", &flags);
gfp = GFP_TRANSHUGE; gfp = GFP_TRANSHUGE;
test("GFP_TRANSHUGE", "%pGg", &gfp); test("GFP_TRANSHUGE", "%pGg", &gfp);
......
...@@ -148,8 +148,6 @@ void vma_set_page_prot(struct vm_area_struct *vma) ...@@ -148,8 +148,6 @@ void vma_set_page_prot(struct vm_area_struct *vma)
static void __remove_shared_vm_struct(struct vm_area_struct *vma, static void __remove_shared_vm_struct(struct vm_area_struct *vma,
struct file *file, struct address_space *mapping) struct file *file, struct address_space *mapping)
{ {
if (vma->vm_flags & VM_DENYWRITE)
allow_write_access(file);
if (vma->vm_flags & VM_SHARED) if (vma->vm_flags & VM_SHARED)
mapping_unmap_writable(mapping); mapping_unmap_writable(mapping);
...@@ -666,8 +664,6 @@ static void __vma_link_file(struct vm_area_struct *vma) ...@@ -666,8 +664,6 @@ static void __vma_link_file(struct vm_area_struct *vma)
if (file) { if (file) {
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
if (vma->vm_flags & VM_DENYWRITE)
put_write_access(file_inode(file));
if (vma->vm_flags & VM_SHARED) if (vma->vm_flags & VM_SHARED)
mapping_allow_writable(mapping); mapping_allow_writable(mapping);
...@@ -1788,22 +1784,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1788,22 +1784,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vma->vm_pgoff = pgoff; vma->vm_pgoff = pgoff;
if (file) { if (file) {
if (vm_flags & VM_DENYWRITE) {
error = deny_write_access(file);
if (error)
goto free_vma;
}
if (vm_flags & VM_SHARED) { if (vm_flags & VM_SHARED) {
error = mapping_map_writable(file->f_mapping); error = mapping_map_writable(file->f_mapping);
if (error) if (error)
goto allow_write_and_free_vma; goto free_vma;
} }
/* ->mmap() can change vma->vm_file, but must guarantee that
* vma_link() below can deny write-access if VM_DENYWRITE is set
* and map writably if VM_SHARED is set. This usually means the
* new file must not have been exposed to user-space, yet.
*/
vma->vm_file = get_file(file); vma->vm_file = get_file(file);
error = call_mmap(file, vma); error = call_mmap(file, vma);
if (error) if (error)
...@@ -1860,13 +1846,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1860,13 +1846,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vma_link(mm, vma, prev, rb_link, rb_parent); vma_link(mm, vma, prev, rb_link, rb_parent);
/* Once vma denies write, undo our temporary denial count */ /* Once vma denies write, undo our temporary denial count */
if (file) {
unmap_writable: unmap_writable:
if (vm_flags & VM_SHARED) if (file && vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping); mapping_unmap_writable(file->f_mapping);
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
}
file = vma->vm_file; file = vma->vm_file;
out: out:
perf_event_mmap(vma); perf_event_mmap(vma);
...@@ -1906,9 +1888,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1906,9 +1888,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
charged = 0; charged = 0;
if (vm_flags & VM_SHARED) if (vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping); mapping_unmap_writable(file->f_mapping);
allow_write_and_free_vma:
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
free_vma: free_vma:
vm_area_free(vma); vm_area_free(vma);
unacct_error: unacct_error:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment