Commit 0716e348 authored by Ian Forbes's avatar Ian Forbes Committed by Zack Rusin

drm/vmwgfx: Remove unused code

Remove unused structs, members, and file. Many of these are written but
never read.
Signed-off-by: default avatarIan Forbes <ian.forbes@broadcom.com>
Signed-off-by: default avatarZack Rusin <zack.rusin@broadcom.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240214210440.26167-1-ian.forbes@broadcom.com
parent d1806492
...@@ -87,14 +87,11 @@ struct ttm_object_file { ...@@ -87,14 +87,11 @@ struct ttm_object_file {
* *
* @object_lock: lock that protects idr. * @object_lock: lock that protects idr.
* *
* @object_count: Per device object count.
*
* This is the per-device data structure needed for ttm object management. * This is the per-device data structure needed for ttm object management.
*/ */
struct ttm_object_device { struct ttm_object_device {
spinlock_t object_lock; spinlock_t object_lock;
atomic_t object_count;
struct dma_buf_ops ops; struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf); void (*dmabuf_release)(struct dma_buf *dma_buf);
struct idr idr; struct idr idr;
...@@ -431,7 +428,6 @@ ttm_object_device_init(const struct dma_buf_ops *ops) ...@@ -431,7 +428,6 @@ ttm_object_device_init(const struct dma_buf_ops *ops)
return NULL; return NULL;
spin_lock_init(&tdev->object_lock); spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
/* /*
* Our base is at VMWGFX_NUM_MOB + 1 because we want to create * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
......
...@@ -117,25 +117,8 @@ struct vmwgfx_hash_item { ...@@ -117,25 +117,8 @@ struct vmwgfx_hash_item {
unsigned long key; unsigned long key;
}; };
/**
* struct vmw_validate_buffer - Carries validation info about buffers.
*
* @base: Validation info for TTM.
* @hash: Hash entry for quick lookup of the TTM buffer object.
*
* This structure contains also driver private validation info
* on top of the info needed by TTM.
*/
struct vmw_validate_buffer {
struct ttm_validate_buffer base;
struct vmwgfx_hash_item hash;
bool validate_as_mob;
};
struct vmw_res_func; struct vmw_res_func;
/** /**
* struct vmw-resource - base class for hardware resources * struct vmw-resource - base class for hardware resources
* *
...@@ -445,15 +428,6 @@ struct vmw_sw_context{ ...@@ -445,15 +428,6 @@ struct vmw_sw_context{
struct vmw_legacy_display; struct vmw_legacy_display;
struct vmw_overlay; struct vmw_overlay;
struct vmw_vga_topology_state {
uint32_t width;
uint32_t height;
uint32_t primary;
uint32_t pos_x;
uint32_t pos_y;
};
/* /*
* struct vmw_otable - Guest Memory OBject table metadata * struct vmw_otable - Guest Memory OBject table metadata
* *
...@@ -501,7 +475,6 @@ struct vmw_private { ...@@ -501,7 +475,6 @@ struct vmw_private {
struct drm_device drm; struct drm_device drm;
struct ttm_device bdev; struct ttm_device bdev;
struct drm_vma_offset_manager vma_manager;
u32 pci_id; u32 pci_id;
resource_size_t io_start; resource_size_t io_start;
resource_size_t vram_start; resource_size_t vram_start;
......
...@@ -775,7 +775,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, ...@@ -775,7 +775,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_y = du->hotspot_y + new_state->hotspot_y; hotspot_y = du->hotspot_y + new_state->hotspot_y;
du->cursor_surface = vps->surf; du->cursor_surface = vps->surf;
du->cursor_bo = vps->bo;
if (!vps->surf && !vps->bo) { if (!vps->surf && !vps->bo) {
vmw_cursor_update_position(dev_priv, false, 0, 0); vmw_cursor_update_position(dev_priv, false, 0, 0);
...@@ -858,15 +857,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, ...@@ -858,15 +857,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING,
false, true); false, true);
if (!ret && new_fb) {
struct drm_crtc *crtc = new_state->crtc;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
vmw_connector_state_to_vcs(du->connector.state);
}
return ret; return ret;
} }
...@@ -1361,7 +1351,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, ...@@ -1361,7 +1351,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface); vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handles[0];
vfbs->is_bo_proxy = is_bo_proxy; vfbs->is_bo_proxy = is_bo_proxy;
*out = &vfbs->base; *out = &vfbs->base;
...@@ -1529,7 +1518,6 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, ...@@ -1529,7 +1518,6 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true; vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo); vfbd->buffer = vmw_bo_reference(bo);
vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base; *out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base, ret = drm_framebuffer_init(dev, &vfbd->base.base,
......
...@@ -217,21 +217,11 @@ struct vmw_kms_dirty { ...@@ -217,21 +217,11 @@ struct vmw_kms_dirty {
struct vmw_framebuffer { struct vmw_framebuffer {
struct drm_framebuffer base; struct drm_framebuffer base;
bool bo; bool bo;
uint32_t user_handle;
};
/*
* Clip rectangle
*/
struct vmw_clip_rect {
int x1, x2, y1, y2;
}; };
struct vmw_framebuffer_surface { struct vmw_framebuffer_surface {
struct vmw_framebuffer base; struct vmw_framebuffer base;
struct vmw_surface *surface; struct vmw_surface *surface;
struct vmw_bo *buffer;
struct list_head head;
bool is_bo_proxy; /* true if this is proxy surface for DMA buf */ bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
}; };
...@@ -359,7 +349,6 @@ struct vmw_display_unit { ...@@ -359,7 +349,6 @@ struct vmw_display_unit {
struct vmw_cursor_plane cursor; struct vmw_cursor_plane cursor;
struct vmw_surface *cursor_surface; struct vmw_surface *cursor_surface;
struct vmw_bo *cursor_bo;
size_t cursor_age; size_t cursor_age;
int cursor_x; int cursor_x;
...@@ -389,11 +378,6 @@ struct vmw_display_unit { ...@@ -389,11 +378,6 @@ struct vmw_display_unit {
int set_gui_y; int set_gui_y;
}; };
struct vmw_validation_ctx {
struct vmw_resource *res;
struct vmw_bo *buf;
};
#define vmw_crtc_to_du(x) \ #define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc) container_of(x, struct vmw_display_unit, crtc)
#define vmw_connector_to_du(x) \ #define vmw_connector_to_du(x) \
......
...@@ -89,7 +89,6 @@ struct vmw_kms_sou_define_gmrfb { ...@@ -89,7 +89,6 @@ struct vmw_kms_sou_define_gmrfb {
struct vmw_screen_object_unit { struct vmw_screen_object_unit {
struct vmw_display_unit base; struct vmw_display_unit base;
unsigned long buffer_size; /**< Size of allocated buffer */
struct vmw_bo *buffer; /**< Backing store buffer */ struct vmw_bo *buffer; /**< Backing store buffer */
bool defined; bool defined;
...@@ -240,7 +239,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) ...@@ -240,7 +239,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
int x, y; int x, y;
sou->buffer = vps->bo; sou->buffer = vps->bo;
sou->buffer_size = vps->bo_size;
conn_state = sou->base.connector.state; conn_state = sou->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state); vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
...@@ -255,7 +253,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) ...@@ -255,7 +253,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
} else { } else {
sou->buffer = NULL; sou->buffer = NULL;
sou->buffer_size = 0;
} }
} }
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
static int vmw_bo_vm_lookup(struct ttm_device *bdev,
struct drm_file *filp,
unsigned long offset,
unsigned long pages,
struct ttm_buffer_object **p_bo)
{
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
struct drm_device *drm = &dev_priv->drm;
struct drm_vma_offset_node *node;
int ret;
*p_bo = NULL;
drm_vma_offset_lock_lookup(bdev->vma_manager);
node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) {
*p_bo = container_of(node, struct ttm_buffer_object,
base.vma_node);
*p_bo = ttm_bo_get_unless_zero(*p_bo);
}
drm_vma_offset_unlock_lookup(bdev->vma_manager);
if (!*p_bo) {
drm_err(drm, "Could not find buffer object to map\n");
return -EINVAL;
}
if (!drm_vma_node_is_allowed(node, filp)) {
ret = -EACCES;
goto out_no_access;
}
return 0;
out_no_access:
ttm_bo_put(*p_bo);
return ret;
}
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
static const struct vm_operations_struct vmw_vm_ops = {
.pfn_mkwrite = vmw_bo_vm_mkwrite,
.page_mkwrite = vmw_bo_vm_mkwrite,
.fault = vmw_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
struct ttm_device *bdev = &dev_priv->bdev;
struct ttm_buffer_object *bo;
int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
return -EINVAL;
ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_mmap_obj(vma, bo);
if (unlikely(ret != 0))
goto out_unref;
vma->vm_ops = &vmw_vm_ops;
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
if (!is_cow_mapping(vma->vm_flags))
vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
return 0;
out_unref:
ttm_bo_put(bo);
return ret;
}
...@@ -32,9 +32,6 @@ ...@@ -32,9 +32,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
/** /**
* struct vmw_validation_bo_node - Buffer object validation metadata. * struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation. * @base: Metadata used for TTM reservation- and validation.
...@@ -112,20 +109,10 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, ...@@ -112,20 +109,10 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL; return NULL;
if (ctx->mem_size_left < size) { if (ctx->mem_size_left < size) {
struct page *page; struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) if (!page)
return NULL; return NULL;
if (ctx->vm)
ctx->vm_size_left -= PAGE_SIZE;
list_add_tail(&page->lru, &ctx->page_list); list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page); ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE; ctx->mem_size_left = PAGE_SIZE;
...@@ -155,10 +142,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx) ...@@ -155,10 +142,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
} }
ctx->mem_size_left = 0; ctx->mem_size_left = 0;
if (ctx->vm && ctx->total_mem) {
ctx->total_mem = 0;
ctx->vm_size_left = 0;
}
} }
/** /**
......
...@@ -52,10 +52,6 @@ ...@@ -52,10 +52,6 @@
* buffer objects * buffer objects
* @mem_size_left: Free memory left in the last page in @page_list * @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list * @page_address: Kernel virtual address of the last page in @page_list
* @vm: A pointer to the memory reservation interface or NULL if no
* memory reservation is needed.
* @vm_size_left: Amount of reserved memory that so far has not been allocated.
* @total_mem: Amount of reserved memory.
*/ */
struct vmw_validation_context { struct vmw_validation_context {
struct vmw_sw_context *sw_context; struct vmw_sw_context *sw_context;
...@@ -68,9 +64,6 @@ struct vmw_validation_context { ...@@ -68,9 +64,6 @@ struct vmw_validation_context {
unsigned int merge_dups; unsigned int merge_dups;
unsigned int mem_size_left; unsigned int mem_size_left;
u8 *page_address; u8 *page_address;
struct vmw_validation_mem *vm;
size_t vm_size_left;
size_t total_mem;
}; };
struct vmw_bo; struct vmw_bo;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment