Commit 4d9a2c87 authored by Jan Kara's avatar Jan Kara Committed by Ross Zwisler

dax: Remove i_mmap_lock protection

Currently faults are protected against truncate by filesystem specific
i_mmap_sem and page lock in case of hole page. Cow faults are protected
DAX radix tree entry locking. So there's no need for i_mmap_lock in DAX
code. Remove it.
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
parent bc2466e4
...@@ -798,29 +798,19 @@ static int dax_insert_mapping(struct address_space *mapping, ...@@ -798,29 +798,19 @@ static int dax_insert_mapping(struct address_space *mapping,
.sector = to_sector(bh, mapping->host), .sector = to_sector(bh, mapping->host),
.size = bh->b_size, .size = bh->b_size,
}; };
int error;
void *ret; void *ret;
void *entry = *entryp; void *entry = *entryp;
i_mmap_lock_read(mapping); if (dax_map_atomic(bdev, &dax) < 0)
return PTR_ERR(dax.addr);
if (dax_map_atomic(bdev, &dax) < 0) {
error = PTR_ERR(dax.addr);
goto out;
}
dax_unmap_atomic(bdev, &dax); dax_unmap_atomic(bdev, &dax);
ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector); ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
if (IS_ERR(ret)) { if (IS_ERR(ret))
error = PTR_ERR(ret); return PTR_ERR(ret);
goto out;
}
*entryp = ret; *entryp = ret;
error = vm_insert_mixed(vma, vaddr, dax.pfn); return vm_insert_mixed(vma, vaddr, dax.pfn);
out:
i_mmap_unlock_read(mapping);
return error;
} }
/** /**
...@@ -1058,8 +1048,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -1058,8 +1048,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
truncate_pagecache_range(inode, lstart, lend); truncate_pagecache_range(inode, lstart, lend);
} }
i_mmap_lock_read(mapping);
if (!write && !buffer_mapped(&bh)) { if (!write && !buffer_mapped(&bh)) {
spinlock_t *ptl; spinlock_t *ptl;
pmd_t entry; pmd_t entry;
...@@ -1148,8 +1136,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -1148,8 +1136,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
} }
out: out:
i_mmap_unlock_read(mapping);
return result; return result;
fallback: fallback:
......
...@@ -2453,8 +2453,6 @@ void unmap_mapping_range(struct address_space *mapping, ...@@ -2453,8 +2453,6 @@ void unmap_mapping_range(struct address_space *mapping,
if (details.last_index < details.first_index) if (details.last_index < details.first_index)
details.last_index = ULONG_MAX; details.last_index = ULONG_MAX;
/* DAX uses i_mmap_lock to serialise file truncate vs page fault */
i_mmap_lock_write(mapping); i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details); unmap_mapping_range_tree(&mapping->i_mmap, &details);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment