Commit a41fe02b authored by Dan Williams's avatar Dan Williams

Revert "block: use DAX for partition table reads"

commit d1a5f2b4 ("block: use DAX for partition table reads") was
part of a stalled effort to allow dax mappings of block devices. Since
then the device-dax mechanism has filled the role of dax-mapping static
device ranges.

Now that we are moving ->direct_access() from a block_device operation
to a dax_inode operation we would need block devices to map and carry
their own dax_inode reference.

Unless / until we decide to revive dax mapping of raw block devices
through the dax_inode scheme, there is no need to carry
read_dax_sector(). Its removal in turn allows for the removal of
bdev_direct_access() and should have been included in commit
22375701 ("block_dev: remove DAX leftovers").

Cc: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent fa5d932c
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <linux/kmod.h> #include <linux/kmod.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/genhd.h> #include <linux/genhd.h>
#include <linux/dax.h>
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include "partitions/check.h" #include "partitions/check.h"
...@@ -631,24 +630,12 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev) ...@@ -631,24 +630,12 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
return 0; return 0;
} }
static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
{
struct address_space *mapping = bdev->bd_inode->i_mapping;
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
NULL);
}
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
{ {
struct address_space *mapping = bdev->bd_inode->i_mapping;
struct page *page; struct page *page;
/* don't populate page cache for dax capable devices */ page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)), NULL);
if (IS_DAX(bdev->bd_inode))
page = read_dax_sector(bdev, n);
else
page = read_pagecache_sector(bdev, n);
if (!IS_ERR(page)) { if (!IS_ERR(page)) {
if (PageError(page)) if (PageError(page))
goto fail; goto fail;
......
...@@ -101,26 +101,6 @@ static int dax_is_empty_entry(void *entry) ...@@ -101,26 +101,6 @@ static int dax_is_empty_entry(void *entry)
return (unsigned long)entry & RADIX_DAX_EMPTY; return (unsigned long)entry & RADIX_DAX_EMPTY;
} }
struct page *read_dax_sector(struct block_device *bdev, sector_t n)
{
struct page *page = alloc_pages(GFP_KERNEL, 0);
struct blk_dax_ctl dax = {
.size = PAGE_SIZE,
.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
};
long rc;
if (!page)
return ERR_PTR(-ENOMEM);
rc = dax_map_atomic(bdev, &dax);
if (rc < 0)
return ERR_PTR(rc);
memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
dax_unmap_atomic(bdev, &dax);
return page;
}
/* /*
* DAX radix tree locking * DAX radix tree locking
*/ */
......
...@@ -70,15 +70,9 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping, ...@@ -70,15 +70,9 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
pgoff_t index, void *entry, bool wake_all); pgoff_t index, void *entry, bool wake_all);
#ifdef CONFIG_FS_DAX #ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
int __dax_zero_page_range(struct block_device *bdev, sector_t sector, int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
unsigned int offset, unsigned int length); unsigned int offset, unsigned int length);
#else #else
static inline struct page *read_dax_sector(struct block_device *bdev,
sector_t n)
{
return ERR_PTR(-ENXIO);
}
static inline int __dax_zero_page_range(struct block_device *bdev, static inline int __dax_zero_page_range(struct block_device *bdev,
sector_t sector, unsigned int offset, unsigned int length) sector_t sector, unsigned int offset, unsigned int length)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment