Commit 5e161e40 authored by Jan Kara's avatar Jan Kara Committed by Dan Williams

dax: Factor out getting of pfn out of iomap

Factor out code to get pfn out of iomap that is shared between PTE and
PMD fault path.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 31a6f1a6
...@@ -825,30 +825,53 @@ static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) ...@@ -825,30 +825,53 @@ static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
} }
static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
loff_t pos, void *entry) pfn_t *pfnp)
{ {
const sector_t sector = dax_iomap_sector(iomap, pos); const sector_t sector = dax_iomap_sector(iomap, pos);
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping;
unsigned long vaddr = vmf->address;
void *ret, *kaddr;
pgoff_t pgoff; pgoff_t pgoff;
void *kaddr;
int id, rc; int id, rc;
pfn_t pfn; long length;
rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
if (rc) if (rc)
return rc; return rc;
id = dax_read_lock(); id = dax_read_lock();
rc = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
&kaddr, &pfn); &kaddr, pfnp);
if (rc < 0) { if (length < 0) {
dax_read_unlock(id); rc = length;
return rc; goto out;
} }
rc = -EINVAL;
if (PFN_PHYS(length) < size)
goto out;
if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
goto out;
/* For larger pages we need devmap */
if (length > 1 && !pfn_t_devmap(*pfnp))
goto out;
rc = 0;
out:
dax_read_unlock(id); dax_read_unlock(id);
return rc;
}
static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
loff_t pos, void *entry)
{
const sector_t sector = dax_iomap_sector(iomap, pos);
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping;
unsigned long vaddr = vmf->address;
void *ret;
int rc;
pfn_t pfn;
rc = dax_iomap_pfn(iomap, pos, PAGE_SIZE, &pfn);
if (rc < 0)
return rc;
ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0); ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
if (IS_ERR(ret)) if (IS_ERR(ret))
...@@ -1223,46 +1246,26 @@ static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, ...@@ -1223,46 +1246,26 @@ static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
{ {
struct address_space *mapping = vmf->vma->vm_file->f_mapping; struct address_space *mapping = vmf->vma->vm_file->f_mapping;
const sector_t sector = dax_iomap_sector(iomap, pos); const sector_t sector = dax_iomap_sector(iomap, pos);
struct dax_device *dax_dev = iomap->dax_dev;
struct block_device *bdev = iomap->bdev;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
const size_t size = PMD_SIZE; void *ret = NULL;
void *ret = NULL, *kaddr;
long length = 0;
pgoff_t pgoff;
pfn_t pfn = {}; pfn_t pfn = {};
int id; int rc;
if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0) rc = dax_iomap_pfn(iomap, pos, PMD_SIZE, &pfn);
if (rc < 0)
goto fallback; goto fallback;
id = dax_read_lock();
length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
if (length < 0)
goto unlock_fallback;
length = PFN_PHYS(length);
if (length < size)
goto unlock_fallback;
if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
goto unlock_fallback;
if (!pfn_t_devmap(pfn))
goto unlock_fallback;
dax_read_unlock(id);
ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, ret = dax_insert_mapping_entry(mapping, vmf, entry, sector,
RADIX_DAX_PMD); RADIX_DAX_PMD);
if (IS_ERR(ret)) if (IS_ERR(ret))
goto fallback; goto fallback;
trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret); trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, ret);
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
pfn, vmf->flags & FAULT_FLAG_WRITE); pfn, vmf->flags & FAULT_FLAG_WRITE);
unlock_fallback:
dax_read_unlock(id);
fallback: fallback:
trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret); trace_dax_pmd_insert_mapping_fallback(inode, vmf, PMD_SIZE, pfn, ret);
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment