Commit 62161778 authored by Dave Airlie's avatar Dave Airlie

drm/vmwgfx/ttm: switch gmrid allocator to new init paths.

Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-22-airlied@gmail.com
parent 252f8d7b
...@@ -896,14 +896,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -896,14 +896,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* slots as well as the bo size. * slots as well as the bo size.
*/ */
dev_priv->has_gmr = true; dev_priv->has_gmr = true;
dev_priv->bdev.man[VMW_PL_GMR].func = &vmw_gmrid_manager_func;
dev_priv->bdev.man[VMW_PL_GMR].available_caching = TTM_PL_FLAG_CACHED;
dev_priv->bdev.man[VMW_PL_GMR].default_caching = TTM_PL_FLAG_CACHED;
/* TODO: This is most likely not correct */ /* TODO: This is most likely not correct */
dev_priv->bdev.man[VMW_PL_GMR].use_tt = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, refuse_dma ||
VMW_PL_GMR) != 0) { vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. " DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n"); "Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false; dev_priv->has_gmr = false;
...@@ -911,13 +907,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -911,13 +907,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) { if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
dev_priv->has_mob = true; dev_priv->has_mob = true;
dev_priv->bdev.man[VMW_PL_MOB].func = &vmw_gmrid_manager_func;
dev_priv->bdev.man[VMW_PL_MOB].available_caching = TTM_PL_FLAG_CACHED; if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
dev_priv->bdev.man[VMW_PL_MOB].default_caching = TTM_PL_FLAG_CACHED;
/* TODO: This is most likely not correct */
dev_priv->bdev.man[VMW_PL_MOB].use_tt = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
VMW_PL_MOB) != 0) {
DRM_INFO("No MOB memory available. " DRM_INFO("No MOB memory available. "
"3D will be disabled.\n"); "3D will be disabled.\n");
dev_priv->has_mob = false; dev_priv->has_mob = false;
......
...@@ -1221,7 +1221,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); ...@@ -1221,7 +1221,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
* GMR Id manager * GMR Id manager
*/ */
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
/** /**
* Prime - vmwgfx_prime.c * Prime - vmwgfx_prime.c
......
...@@ -94,22 +94,28 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, ...@@ -94,22 +94,28 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
} }
} }
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
unsigned long p_size)
int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
{ {
struct vmw_private *dev_priv = struct ttm_mem_type_manager *man = &dev_priv->bdev.man[type];
container_of(man->bdev, struct vmw_private, bdev);
struct vmwgfx_gmrid_man *gman = struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL); kzalloc(sizeof(*gman), GFP_KERNEL);
if (unlikely(!gman)) if (unlikely(!gman))
return -ENOMEM; return -ENOMEM;
man->func = &vmw_gmrid_manager_func;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
/* TODO: This is most likely not correct */
man->use_tt = true;
ttm_mem_type_manager_init(&dev_priv->bdev, man, 0);
spin_lock_init(&gman->lock); spin_lock_init(&gman->lock);
gman->used_gmr_pages = 0; gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida); ida_init(&gman->gmr_ida);
switch (p_size) { switch (type) {
case VMW_PL_GMR: case VMW_PL_GMR:
gman->max_gmr_ids = dev_priv->max_gmr_ids; gman->max_gmr_ids = dev_priv->max_gmr_ids;
gman->max_gmr_pages = dev_priv->max_gmr_pages; gman->max_gmr_pages = dev_priv->max_gmr_pages;
...@@ -122,6 +128,8 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, ...@@ -122,6 +128,8 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
BUG(); BUG();
} }
man->priv = (void *) gman; man->priv = (void *) gman;
ttm_mem_type_manager_set_used(man, true);
return 0; return 0;
} }
...@@ -137,8 +145,7 @@ static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man) ...@@ -137,8 +145,7 @@ static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
return 0; return 0;
} }
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
.init = vmw_gmrid_man_init,
.takedown = vmw_gmrid_man_takedown, .takedown = vmw_gmrid_man_takedown,
.get_node = vmw_gmrid_man_get_node, .get_node = vmw_gmrid_man_get_node,
.put_node = vmw_gmrid_man_put_node, .put_node = vmw_gmrid_man_put_node,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment