Commit 945b584c authored by Dave Airlie's avatar Dave Airlie

Merge branch 'linux-5.4' of git://github.com/skeggsb/linux into drm-next

A couple of fixes from Thierry fixing issues as a result of the
reservation object rework in this cycle, as well as a fix from Lyude
to allow the driver to load on Thinkpad P71.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Ben Skeggs <skeggsb@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CACAvsv5bLthhq7kh04A0JKxGnBdOTCxiu0hs7FZ1x3_9Rc9YoA@mail.gmail.com
parents 023941cf b568db62
...@@ -1599,7 +1599,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) ...@@ -1599,7 +1599,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->aux = aux; nv_encoder->aux = aux;
} }
if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) && if (nv_connector->type != DCB_CONNECTOR_eDP &&
(data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) { ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
nv_connector->base.base.id, nv_connector->base.base.id,
......
...@@ -136,10 +136,16 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) ...@@ -136,10 +136,16 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct drm_device *dev = drm->dev; struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
if (unlikely(nvbo->bo.base.filp))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
WARN_ON(nvbo->pin_refcnt > 0); WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL); nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
/*
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.
*/
if (bo->base.dev)
drm_gem_object_release(&bo->base);
kfree(nvbo); kfree(nvbo);
} }
...@@ -186,8 +192,8 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, ...@@ -186,8 +192,8 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
} }
struct nouveau_bo * struct nouveau_bo *
nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode, nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
u32 tile_flags) u32 tile_mode, u32 tile_flags)
{ {
struct nouveau_drm *drm = cli->drm; struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
...@@ -195,8 +201,8 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode, ...@@ -195,8 +201,8 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode,
struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
int i, pi = -1; int i, pi = -1;
if (!size) { if (!*size) {
NV_WARN(drm, "skipped size %016llx\n", size); NV_WARN(drm, "skipped size %016llx\n", *size);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -266,7 +272,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode, ...@@ -266,7 +272,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode,
pi = i; pi = i;
/* Stop once the buffer is larger than the current page size. */ /* Stop once the buffer is larger than the current page size. */
if (size >= 1ULL << vmm->page[i].shift) if (*size >= 1ULL << vmm->page[i].shift)
break; break;
} }
...@@ -281,6 +287,8 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode, ...@@ -281,6 +287,8 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode,
} }
nvbo->page = vmm->page[pi].shift; nvbo->page = vmm->page[pi].shift;
nouveau_bo_fixup_align(nvbo, flags, align, size);
return nvbo; return nvbo;
} }
...@@ -294,7 +302,6 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, ...@@ -294,7 +302,6 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0); nouveau_bo_placement_set(nvbo, flags, 0);
...@@ -318,7 +325,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, ...@@ -318,7 +325,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
int ret; int ret;
nvbo = nouveau_bo_alloc(cli, size, flags, tile_mode, tile_flags); nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
tile_flags);
if (IS_ERR(nvbo)) if (IS_ERR(nvbo))
return PTR_ERR(nvbo); return PTR_ERR(nvbo);
......
...@@ -71,8 +71,8 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) ...@@ -71,8 +71,8 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
extern struct ttm_bo_driver nouveau_bo_driver; extern struct ttm_bo_driver nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *); void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 size, u32 flags, struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
u32 tile_mode, u32 tile_flags); u32 flags, u32 tile_mode, u32 tile_flags);
int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags, int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags,
struct sg_table *sg, struct dma_resv *robj); struct sg_table *sg, struct dma_resv *robj);
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
......
...@@ -51,10 +51,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) ...@@ -51,10 +51,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
if (gem->import_attach) if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg); drm_prime_gem_destroy(gem, nvbo->bo.sg);
drm_gem_object_release(gem);
/* reset filp so nouveau_bo_del_ttm() can test for it */
gem->filp = NULL;
ttm_bo_put(&nvbo->bo); ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev); pm_runtime_mark_last_busy(dev);
...@@ -188,7 +184,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, ...@@ -188,7 +184,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
flags |= TTM_PL_FLAG_UNCACHED; flags |= TTM_PL_FLAG_UNCACHED;
nvbo = nouveau_bo_alloc(cli, size, flags, tile_mode, tile_flags); nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
tile_flags);
if (IS_ERR(nvbo)) if (IS_ERR(nvbo))
return PTR_ERR(nvbo); return PTR_ERR(nvbo);
......
...@@ -60,19 +60,22 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -60,19 +60,22 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sg) struct sg_table *sg)
{ {
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_gem_object *obj;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
struct dma_resv *robj = attach->dmabuf->resv; struct dma_resv *robj = attach->dmabuf->resv;
size_t size = attach->dmabuf->size; u64 size = attach->dmabuf->size;
u32 flags = 0; u32 flags = 0;
int align = 0;
int ret; int ret;
flags = TTM_PL_FLAG_TT; flags = TTM_PL_FLAG_TT;
dma_resv_lock(robj, NULL); dma_resv_lock(robj, NULL);
nvbo = nouveau_bo_alloc(&drm->client, size, flags, 0, 0); nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0);
dma_resv_unlock(robj); if (IS_ERR(nvbo)) {
if (IS_ERR(nvbo)) obj = ERR_CAST(nvbo);
return ERR_CAST(nvbo); goto unlock;
}
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
...@@ -81,16 +84,22 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -81,16 +84,22 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
ret = drm_gem_object_init(dev, &nvbo->bo.base, size); ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
if (ret) { if (ret) {
nouveau_bo_ref(NULL, &nvbo); nouveau_bo_ref(NULL, &nvbo);
return ERR_PTR(-ENOMEM); obj = ERR_PTR(-ENOMEM);
goto unlock;
} }
ret = nouveau_bo_init(nvbo, size, 0, flags, sg, robj); ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
if (ret) { if (ret) {
nouveau_bo_ref(NULL, &nvbo); nouveau_bo_ref(NULL, &nvbo);
return ERR_PTR(ret); obj = ERR_PTR(ret);
goto unlock;
} }
return &nvbo->bo.base; obj = &nvbo->bo.base;
unlock:
dma_resv_unlock(robj);
return obj;
} }
int nouveau_gem_prime_pin(struct drm_gem_object *obj) int nouveau_gem_prime_pin(struct drm_gem_object *obj)
......
...@@ -26,7 +26,6 @@ gm20b_bar_func = { ...@@ -26,7 +26,6 @@ gm20b_bar_func = {
.dtor = gf100_bar_dtor, .dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit, .oneinit = gf100_bar_oneinit,
.bar1.init = gf100_bar_bar1_init, .bar1.init = gf100_bar_bar1_init,
.bar1.fini = gf100_bar_bar1_fini,
.bar1.wait = gm107_bar_bar1_wait, .bar1.wait = gm107_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm, .bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush, .flush = g84_bar_flush,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment