Commit 551199ac authored by Bart Van Assche's avatar Bart Van Assche Committed by Doug Ledford

lib/dma-virt: Add dma_virt_ops

Several RDMA drivers (hfi1, qib and rxe) expect that ib_sge.addr
is a virtual address. Provide DMA mapping operations that are
suitable for these drivers.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 7844572c
...@@ -128,6 +128,7 @@ struct dma_map_ops { ...@@ -128,6 +128,7 @@ struct dma_map_ops {
}; };
extern const struct dma_map_ops dma_noop_ops; extern const struct dma_map_ops dma_noop_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
......
...@@ -400,6 +400,11 @@ config DMA_NOOP_OPS ...@@ -400,6 +400,11 @@ config DMA_NOOP_OPS
depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
default n default n
config DMA_VIRT_OPS
bool
depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
default n
config CHECK_SIGNATURE config CHECK_SIGNATURE
bool bool
......
...@@ -27,6 +27,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ ...@@ -27,6 +27,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o lib-$(CONFIG_SMP) += cpumask.o
lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
lib-y += kobject.o klist.o lib-y += kobject.o klist.o
obj-y += lockref.o obj-y += lockref.o
......
/*
* lib/dma-virt.c
*
* DMA operations that map to virtual addresses without flushing memory.
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
static void *dma_virt_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs)
{
void *ret;
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret)
*dma_handle = (uintptr_t)ret;
return ret;
}
static void dma_virt_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
unsigned long attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
return (uintptr_t)(page_address(page) + offset);
}
static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
BUG_ON(!sg_page(sg));
sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
sg_dma_len(sg) = sg->length;
}
return nents;
}
static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return false;
}
static int dma_virt_supported(struct device *dev, u64 mask)
{
return true;
}
const struct dma_map_ops dma_virt_ops = {
.alloc = dma_virt_alloc,
.free = dma_virt_free,
.map_page = dma_virt_map_page,
.map_sg = dma_virt_map_sg,
.mapping_error = dma_virt_mapping_error,
.dma_supported = dma_virt_supported,
};
EXPORT_SYMBOL(dma_virt_ops);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment