Commit 878c6ecd authored by Deepak Rawat's avatar Deepak Rawat Committed by Roland Scheidegger

drm/vmwgfx: Use enum to represent graphics context capabilities

Instead of having different bool in device private to represent
incremental graphics context capabilities, add a new sm type enum.

v2: Use enum instead of bit flag.

v3: Incorporated review comments.
Signed-off-by: default avatarDeepak Rawat <drawat.floss@gmail.com>
Reviewed-by: default avatarThomas Hellström (VMware) <thomas_os@shipmail.org>
Reviewed-by: default avatarRoland Scheidegger <sroland@vmware.com>
Signed-off-by: default avatarRoland Scheidegger <sroland@vmware.com>
parent 3d143954
...@@ -731,7 +731,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, ...@@ -731,7 +731,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
}; };
int ret; int ret;
if (!dev_priv->has_dx && dx) { if (!has_sm4_context(dev_priv) && dx) {
VMW_DEBUG_USER("DX contexts not supported by device.\n"); VMW_DEBUG_USER("DX contexts not supported by device.\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -449,7 +449,7 @@ static int vmw_request_device(struct vmw_private *dev_priv) ...@@ -449,7 +449,7 @@ static int vmw_request_device(struct vmw_private *dev_priv)
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
if (IS_ERR(dev_priv->cman)) { if (IS_ERR(dev_priv->cman)) {
dev_priv->cman = NULL; dev_priv->cman = NULL;
dev_priv->has_dx = false; dev_priv->sm_type = VMW_SM_LEGACY;
} }
ret = vmw_request_device_late(dev_priv); ret = vmw_request_device_late(dev_priv);
...@@ -886,11 +886,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -886,11 +886,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
spin_lock(&dev_priv->cap_lock); spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
dev_priv->sm_type = VMW_SM_4;
spin_unlock(&dev_priv->cap_lock); spin_unlock(&dev_priv->cap_lock);
} }
vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
if (has_sm4_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
dev_priv->sm_type = VMW_SM_4_1;
}
ret = vmw_kms_init(dev_priv); ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_kms; goto out_no_kms;
...@@ -900,23 +911,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -900,23 +911,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret) if (ret)
goto out_no_fifo; goto out_no_fifo;
if (dev_priv->has_dx) {
/*
* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
* support
*/
if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
SVGA3D_DEVCAP_SM41);
dev_priv->has_sm4_1 = vmw_read(dev_priv,
SVGA_REG_DEV_CAP);
}
}
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC) DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
? "yes." : "no."); ? "yes." : "no.");
DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no."); if (dev_priv->sm_type == VMW_SM_4_1)
DRM_INFO("SM4_1 support available.\n");
if (dev_priv->sm_type == VMW_SM_4)
DRM_INFO("SM4 support available.\n");
snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s", snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
VMWGFX_REPO, VMWGFX_GIT_VERSION); VMWGFX_REPO, VMWGFX_GIT_VERSION);
......
...@@ -441,6 +441,20 @@ enum { ...@@ -441,6 +441,20 @@ enum {
VMW_IRQTHREAD_MAX VMW_IRQTHREAD_MAX
}; };
/**
* enum vmw_sm_type - Graphics context capability supported by device.
* @VMW_SM_LEGACY: Pre DX context.
* @VMW_SM_4: Context support upto SM4.
* @VMW_SM_4_1: Context support upto SM4_1.
* @VMW_SM_MAX: Should be the last.
*/
enum vmw_sm_type {
VMW_SM_LEGACY = 0,
VMW_SM_4,
VMW_SM_4_1,
VMW_SM_MAX
};
struct vmw_private { struct vmw_private {
struct ttm_bo_device bdev; struct ttm_bo_device bdev;
...@@ -475,9 +489,9 @@ struct vmw_private { ...@@ -475,9 +489,9 @@ struct vmw_private {
bool has_mob; bool has_mob;
spinlock_t hw_lock; spinlock_t hw_lock;
spinlock_t cap_lock; spinlock_t cap_lock;
bool has_dx;
bool assume_16bpp; bool assume_16bpp;
bool has_sm4_1;
enum vmw_sm_type sm_type;
/* /*
* Framebuffer info. * Framebuffer info.
...@@ -648,6 +662,28 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, ...@@ -648,6 +662,28 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val; return val;
} }
/**
* has_sm4_context - Does the device support SM4 context.
* @dev_priv: Device private.
*
* Return: Bool value if device support SM4 context or not.
*/
static inline bool has_sm4_context(const struct vmw_private *dev_priv)
{
return (dev_priv->sm_type >= VMW_SM_4);
}
/**
* has_sm4_1_context - Does the device support SM4_1 context.
* @dev_priv: Device private.
*
* Return: Bool value if device support SM4_1 context or not.
*/
static inline bool has_sm4_1_context(const struct vmw_private *dev_priv)
{
return (dev_priv->sm_type >= VMW_SM_4_1);
}
extern void vmw_svga_enable(struct vmw_private *dev_priv); extern void vmw_svga_enable(struct vmw_private *dev_priv);
extern void vmw_svga_disable(struct vmw_private *dev_priv); extern void vmw_svga_disable(struct vmw_private *dev_priv);
......
...@@ -461,7 +461,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -461,7 +461,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
u32 i; u32 i;
/* Add all cotables to the validation list. */ /* Add all cotables to the validation list. */
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { if (has_sm4_context(dev_priv) &&
vmw_res_type(ctx) == vmw_res_dx_context) {
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
res = vmw_context_cotable(ctx, i); res = vmw_context_cotable(ctx, i);
if (IS_ERR(res)) if (IS_ERR(res))
...@@ -489,7 +490,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -489,7 +490,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
break; break;
} }
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { if (has_sm4_context(dev_priv) &&
vmw_res_type(ctx) == vmw_res_dx_context) {
struct vmw_buffer_object *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx); dx_query_mob = vmw_context_get_dx_query_mob(ctx);
......
...@@ -114,10 +114,10 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -114,10 +114,10 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
(dev_priv->active_display_unit == vmw_du_screen_target); (dev_priv->active_display_unit == vmw_du_screen_target);
break; break;
case DRM_VMW_PARAM_DX: case DRM_VMW_PARAM_DX:
param->value = dev_priv->has_dx; param->value = has_sm4_context(dev_priv);
break; break;
case DRM_VMW_PARAM_SM4_1: case DRM_VMW_PARAM_SM4_1:
param->value = dev_priv->has_sm4_1; param->value = has_sm4_1_context(dev_priv);
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -941,7 +941,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, ...@@ -941,7 +941,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* For DX, surface format validation is done when surface->scanout * For DX, surface format validation is done when surface->scanout
* is set. * is set.
*/ */
if (!dev_priv->has_dx && format != surface->format) { if (!has_sm4_context(dev_priv) && format != surface->format) {
DRM_ERROR("Invalid surface format for requested mode.\n"); DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -320,7 +320,7 @@ int vmw_otables_setup(struct vmw_private *dev_priv) ...@@ -320,7 +320,7 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
struct vmw_otable **otables = &dev_priv->otable_batch.otables; struct vmw_otable **otables = &dev_priv->otable_batch.otables;
int ret; int ret;
if (dev_priv->has_dx) { if (has_sm4_context(dev_priv)) {
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
if (!(*otables)) if (!(*otables))
return -ENOMEM; return -ENOMEM;
......
...@@ -1092,12 +1092,12 @@ static int vmw_gb_surface_create(struct vmw_resource *res) ...@@ -1092,12 +1092,12 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo; goto out_no_fifo;
} }
if (dev_priv->has_sm4_1 && srf->array_size > 0) { if (has_sm4_1_context(dev_priv) && srf->array_size > 0) {
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3; cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
cmd_len = sizeof(cmd3->body); cmd_len = sizeof(cmd3->body);
submit_len = sizeof(*cmd3); submit_len = sizeof(*cmd3);
} else if (srf->array_size > 0) { } else if (srf->array_size > 0) {
/* has_dx checked on creation time. */ /* VMW_SM_4 support verified at creation time. */
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2; cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
cmd_len = sizeof(cmd2->body); cmd_len = sizeof(cmd2->body);
submit_len = sizeof(*cmd2); submit_len = sizeof(*cmd2);
...@@ -1115,7 +1115,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) ...@@ -1115,7 +1115,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo; goto out_no_fifo;
} }
if (dev_priv->has_sm4_1 && srf->array_size > 0) { if (has_sm4_1_context(dev_priv) && srf->array_size > 0) {
cmd3->header.id = cmd_id; cmd3->header.id = cmd_id;
cmd3->header.size = cmd_len; cmd3->header.size = cmd_len;
cmd3->body.sid = srf->res.id; cmd3->body.sid = srf->res.id;
...@@ -1443,7 +1443,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, ...@@ -1443,7 +1443,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
} }
/* array_size must be null for non-GL3 host. */ /* array_size must be null for non-GL3 host. */
if (array_size > 0 && !dev_priv->has_dx) { if (array_size > 0 && !has_sm4_context(dev_priv)) {
VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n"); VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1601,7 +1601,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1601,7 +1601,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
req->base.svga3d_flags); req->base.svga3d_flags);
if (!dev_priv->has_sm4_1) { if (!has_sm4_1_context(dev_priv)) {
/* /*
* If SM4_1 is not support then cannot send 64-bit flag to * If SM4_1 is not support then cannot send 64-bit flag to
* device. * device.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment