Commit 284562e1 authored by Gurchetan Singh's avatar Gurchetan Singh Committed by Gerd Hoffmann

udmabuf: implement begin_cpu_access/end_cpu_access hooks

With the misc device, we should end up using the result of
get_arch_dma_ops(..) or dma-direct ops.

This can allow us to have WC mappings in the guest after
synchronization.
Signed-off-by: default avatarGurchetan Singh <gurchetansingh@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-4-gurchetansingh@chromium.orgSigned-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent 17a7ce20
...@@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ ...@@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
struct udmabuf { struct udmabuf {
pgoff_t pagecount; pgoff_t pagecount;
struct page **pages; struct page **pages;
struct sg_table *sg;
struct miscdevice *device; struct miscdevice *device;
}; };
...@@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at, ...@@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
static void release_udmabuf(struct dma_buf *buf) static void release_udmabuf(struct dma_buf *buf)
{ {
struct udmabuf *ubuf = buf->priv; struct udmabuf *ubuf = buf->priv;
struct device *dev = ubuf->device->this_device;
pgoff_t pg; pgoff_t pg;
if (ubuf->sg)
put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
for (pg = 0; pg < ubuf->pagecount; pg++) for (pg = 0; pg < ubuf->pagecount; pg++)
put_page(ubuf->pages[pg]); put_page(ubuf->pages[pg]);
kfree(ubuf->pages); kfree(ubuf->pages);
kfree(ubuf); kfree(ubuf);
} }
static int begin_cpu_udmabuf(struct dma_buf *buf,
enum dma_data_direction direction)
{
struct udmabuf *ubuf = buf->priv;
struct device *dev = ubuf->device->this_device;
if (!ubuf->sg) {
ubuf->sg = get_sg_table(dev, buf, direction);
if (IS_ERR(ubuf->sg))
return PTR_ERR(ubuf->sg);
} else {
dma_sync_sg_for_device(dev, ubuf->sg->sgl,
ubuf->sg->nents,
direction);
}
return 0;
}
static int end_cpu_udmabuf(struct dma_buf *buf,
enum dma_data_direction direction)
{
struct udmabuf *ubuf = buf->priv;
struct device *dev = ubuf->device->this_device;
if (!ubuf->sg)
return -EINVAL;
dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
return 0;
}
static const struct dma_buf_ops udmabuf_ops = { static const struct dma_buf_ops udmabuf_ops = {
.cache_sgt_mapping = true, .cache_sgt_mapping = true,
.map_dma_buf = map_udmabuf, .map_dma_buf = map_udmabuf,
.unmap_dma_buf = unmap_udmabuf, .unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf, .release = release_udmabuf,
.mmap = mmap_udmabuf, .mmap = mmap_udmabuf,
.begin_cpu_access = begin_cpu_udmabuf,
.end_cpu_access = end_cpu_udmabuf,
}; };
#define SEALS_WANTED (F_SEAL_SHRINK) #define SEALS_WANTED (F_SEAL_SHRINK)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment