Commit d4b29fd7 authored by Dan Williams's avatar Dan Williams

block: remove block_device_operations ->direct_access()

Now that all the producers and consumers of dax interfaces have been
converted to using dax_operations on a dax_device, remove the block
device direct_access enabling.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 2093f2e9
......@@ -139,6 +139,10 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
return BLK_QC_T_NONE;
}
static const struct block_device_operations axon_ram_devops = {
.owner = THIS_MODULE,
};
static long
__axon_ram_direct_access(struct axon_ram_bank *bank, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn)
......@@ -150,25 +154,6 @@ __axon_ram_direct_access(struct axon_ram_bank *bank, pgoff_t pgoff, long nr_page
return (bank->size - offset) / PAGE_SIZE;
}
/**
* axon_ram_direct_access - direct_access() method for block device
* @device, @sector, @data: see block_device_operations method
*/
static long
axon_ram_blk_direct_access(struct block_device *device, sector_t sector,
void **kaddr, pfn_t *pfn, long size)
{
struct axon_ram_bank *bank = device->bd_disk->private_data;
return __axon_ram_direct_access(bank, (sector * 512) / PAGE_SIZE,
size / PAGE_SIZE, kaddr, pfn) * PAGE_SIZE;
}
static const struct block_device_operations axon_ram_devops = {
.owner = THIS_MODULE,
.direct_access = axon_ram_blk_direct_access
};
static long
axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn)
......
......@@ -395,18 +395,6 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
return 1;
}
static long brd_blk_direct_access(struct block_device *bdev, sector_t sector,
void **kaddr, pfn_t *pfn, long size)
{
struct brd_device *brd = bdev->bd_disk->private_data;
long nr_pages = __brd_direct_access(brd, PHYS_PFN(sector * 512),
PHYS_PFN(size), kaddr, pfn);
if (nr_pages < 0)
return nr_pages;
return nr_pages * PAGE_SIZE;
}
static long brd_dax_direct_access(struct dax_device *dax_dev,
pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
{
......@@ -418,14 +406,11 @@ static long brd_dax_direct_access(struct dax_device *dax_dev,
static const struct dax_operations brd_dax_ops = {
.direct_access = brd_dax_direct_access,
};
#else
#define brd_blk_direct_access NULL
#endif
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.rw_page = brd_rw_page,
.direct_access = brd_blk_direct_access,
};
/*
......
......@@ -957,18 +957,6 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
void **kaddr, pfn_t *pfn, long size)
{
struct mapped_device *md = bdev->bd_disk->private_data;
struct dax_device *dax_dev = md->dax_dev;
long nr_pages = size / PAGE_SIZE;
nr_pages = dm_dax_direct_access(dax_dev, sector / PAGE_SECTORS,
nr_pages, kaddr, pfn);
return nr_pages < 0 ? nr_pages : nr_pages * PAGE_SIZE;
}
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH.
......@@ -2823,7 +2811,6 @@ static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.direct_access = dm_blk_direct_access,
.getgeo = dm_blk_getgeo,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
......
......@@ -220,19 +220,9 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}
static long pmem_blk_direct_access(struct block_device *bdev, sector_t sector,
void **kaddr, pfn_t *pfn, long size)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
return __pmem_direct_access(pmem, PHYS_PFN(sector * 512),
PHYS_PFN(size), kaddr, pfn);
}
static const struct block_device_operations pmem_fops = {
.owner = THIS_MODULE,
.rw_page = pmem_rw_page,
.direct_access = pmem_blk_direct_access,
.revalidate_disk = nvdimm_revalidate_disk,
};
......
......@@ -31,8 +31,6 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q,
struct bio *bio);
static long dcssblk_blk_direct_access(struct block_device *bdev, sector_t secnum,
void **kaddr, pfn_t *pfn, long size);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
......@@ -43,7 +41,6 @@ static const struct block_device_operations dcssblk_devops = {
.owner = THIS_MODULE,
.open = dcssblk_open,
.release = dcssblk_release,
.direct_access = dcssblk_blk_direct_access,
};
static const struct dax_operations dcssblk_dax_ops = {
......@@ -915,19 +912,6 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
return (dev_sz - offset) / PAGE_SIZE;
}
static long
dcssblk_blk_direct_access(struct block_device *bdev, sector_t secnum,
void **kaddr, pfn_t *pfn, long size)
{
struct dcssblk_dev_info *dev_info;
dev_info = bdev->bd_disk->private_data;
if (!dev_info)
return -ENODEV;
return __dcssblk_direct_access(dev_info, PHYS_PFN(secnum * 512),
PHYS_PFN(size), kaddr, pfn) * PAGE_SIZE;
}
static long
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn)
......
......@@ -718,51 +718,6 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
}
EXPORT_SYMBOL_GPL(bdev_write_page);
/**
* bdev_direct_access() - Get the address for directly-accessibly memory
* @bdev: The device containing the memory
* @dax: control and output parameters for ->direct_access
*
* If a block device is made up of directly addressable memory, this function
* will tell the caller the PFN and the address of the memory. The address
* may be directly dereferenced within the kernel without the need to call
* ioremap(), kmap() or similar. The PFN is suitable for inserting into
* page tables.
*
* Return: negative errno if an error occurs, otherwise the number of bytes
* accessible at this address.
*/
long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
{
sector_t sector = dax->sector;
long avail, size = dax->size;
const struct block_device_operations *ops = bdev->bd_disk->fops;
/*
* The device driver is allowed to sleep, in order to make the
* memory directly accessible.
*/
might_sleep();
if (size < 0)
return size;
if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
return -EOPNOTSUPP;
if ((sector + DIV_ROUND_UP(size, 512)) >
part_nr_sects_read(bdev->bd_part))
return -ERANGE;
sector += get_start_sect(bdev);
if (sector % (PAGE_SIZE / 512))
return -EINVAL;
avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
if (!avail)
return -ERANGE;
if (avail > 0 && avail & ~PAGE_MASK)
return -ENXIO;
return min(avail, size);
}
EXPORT_SYMBOL_GPL(bdev_direct_access);
int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
pgoff_t *pgoff)
{
......
......@@ -1916,28 +1916,12 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
/**
* struct blk_dax_ctl - control and output parameters for ->direct_access
* @sector: (input) offset relative to a block_device
* @addr: (output) kernel virtual address for @sector populated by driver
* @pfn: (output) page frame number for @addr populated by driver
* @size: (input) number of bytes requested
*/
struct blk_dax_ctl {
sector_t sector;
void *addr;
long size;
pfn_t pfn;
};
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
......@@ -1956,7 +1940,6 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
extern int bdev_dax_supported(struct super_block *, int);
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
#else /* CONFIG_BLOCK */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment