Commit bd697a80 authored by Vishal Verma's avatar Vishal Verma Committed by Dan Williams

pmem: reduce kmap_atomic sections to the memcpys only

pmem_do_bvec used to kmap_atomic at the begin, and only unmap at the
end. Things like nvdimm_clear_poison may want to do nvdimm subsystem
bookkeeping operations that may involve taking locks or doing memory
allocations, and we can't do that from the atomic context. Reduce the
atomic context to just what needs it - the memcpy to/from pmem.

Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarVishal Verma <vishal.l.verma@intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 9ffd6350
...@@ -66,13 +66,32 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, ...@@ -66,13 +66,32 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
invalidate_pmem(pmem->virt_addr + offset, len); invalidate_pmem(pmem->virt_addr + offset, len);
} }
static void write_pmem(void *pmem_addr, struct page *page,
unsigned int off, unsigned int len)
{
void *mem = kmap_atomic(page);
memcpy_to_pmem(pmem_addr, mem + off, len);
kunmap_atomic(mem);
}
static int read_pmem(struct page *page, unsigned int off,
void *pmem_addr, unsigned int len)
{
int rc;
void *mem = kmap_atomic(page);
rc = memcpy_from_pmem(mem + off, pmem_addr, len);
kunmap_atomic(mem);
return rc;
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
unsigned int len, unsigned int off, bool is_write, unsigned int len, unsigned int off, bool is_write,
sector_t sector) sector_t sector)
{ {
int rc = 0; int rc = 0;
bool bad_pmem = false; bool bad_pmem = false;
void *mem = kmap_atomic(page);
phys_addr_t pmem_off = sector * 512 + pmem->data_offset; phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
void *pmem_addr = pmem->virt_addr + pmem_off; void *pmem_addr = pmem->virt_addr + pmem_off;
...@@ -83,7 +102,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, ...@@ -83,7 +102,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(bad_pmem)) if (unlikely(bad_pmem))
rc = -EIO; rc = -EIO;
else { else {
rc = memcpy_from_pmem(mem + off, pmem_addr, len); rc = read_pmem(page, off, pmem_addr, len);
flush_dcache_page(page); flush_dcache_page(page);
} }
} else { } else {
...@@ -102,14 +121,13 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, ...@@ -102,14 +121,13 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
* after clear poison. * after clear poison.
*/ */
flush_dcache_page(page); flush_dcache_page(page);
memcpy_to_pmem(pmem_addr, mem + off, len); write_pmem(pmem_addr, page, off, len);
if (unlikely(bad_pmem)) { if (unlikely(bad_pmem)) {
pmem_clear_poison(pmem, pmem_off, len); pmem_clear_poison(pmem, pmem_off, len);
memcpy_to_pmem(pmem_addr, mem + off, len); write_pmem(pmem_addr, page, off, len);
} }
} }
kunmap_atomic(mem);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment