Commit b22870ba authored by Alexandre Courbot's avatar Alexandre Courbot Committed by Ben Skeggs

drm/nouveau: synchronize BOs when required

On architectures for which access to GPU memory is non-coherent,
caches need to be flushed and invalidated explicitly when BO control
changes between CPU and GPU.

This patch adds buffer synchronization functions which invokes the
correct API (PCI or DMA) to ensure synchronization is effective.

Based on the TTM DMA cache helper patches by Lucas Stach.
Signed-off-by: default avatarLucas Stach <dev@lynxeye.de>
Signed-off-by: default avatarAlexandre Courbot <acourbot@nvidia.com>
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent a81349a7
...@@ -426,6 +426,46 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo) ...@@ -426,6 +426,46 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
ttm_bo_kunmap(&nvbo->kmap); ttm_bo_kunmap(&nvbo->kmap);
} }
void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_device *device = nvkm_device(&drm->device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
if (!ttm_dma)
return;
/* Don't waste time looping if the object is coherent */
if (nvbo->force_coherent)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
dma_sync_single_for_device(nv_device_base(device),
ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
}
void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_device *device = nvkm_device(&drm->device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
if (!ttm_dma)
return;
/* Don't waste time looping if the object is coherent */
if (nvbo->force_coherent)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
dma_sync_single_for_cpu(nv_device_base(device),
ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
}
int int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu) bool no_wait_gpu)
...@@ -437,6 +477,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, ...@@ -437,6 +477,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret) if (ret)
return ret; return ret;
nouveau_bo_sync_for_device(nvbo);
return 0; return 0;
} }
......
...@@ -85,6 +85,8 @@ void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); ...@@ -85,6 +85,8 @@ void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive); void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu); bool no_wait_gpu);
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
struct nouveau_vma * struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
......
...@@ -870,6 +870,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, ...@@ -870,6 +870,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
else else
ret = lret; ret = lret;
} }
nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem); drm_gem_object_unreference_unlocked(gem);
return ret; return ret;
...@@ -879,6 +880,17 @@ int ...@@ -879,6 +880,17 @@ int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_nouveau_gem_cpu_fini *req = data;
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
nouveau_bo_sync_for_device(nvbo);
drm_gem_object_unreference_unlocked(gem);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment