Commit b9eb1a61 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Kill a bunch of sparse warnings

We're giving up all attempts to keep cpu- and device byte ordering separate.

This silences sparse when compiled using
make C=2 CF="-D__CHECK_ENDIAN__"
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
parent f6b05004
......@@ -35,7 +35,7 @@
#include "svga_reg.h"
typedef uint32 PPN;
typedef __le64 PPN64;
typedef u64 PPN64;
/*
* 3D Hardware Version
......
......@@ -695,7 +695,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
* no space available ATM, it turns on IRQ handling and sleeps waiting for it to
* become available.
*/
int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
struct drm_mm_node *node,
size_t size,
bool interruptible)
......
......@@ -135,9 +135,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
return;
}
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
cmd->body.cid = cpu_to_le32(res->id);
cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_dec(dev_priv);
......@@ -215,9 +215,9 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
cmd->body.cid = cpu_to_le32(res->id);
cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
......
......@@ -225,7 +225,7 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
......
......@@ -1225,7 +1225,7 @@ static void vmw_master_drop(struct drm_device *dev,
* @dev_priv: Pointer to device private struct.
* Needs the reservation sem to be held in non-exclusive mode.
*/
void __vmw_svga_enable(struct vmw_private *dev_priv)
static void __vmw_svga_enable(struct vmw_private *dev_priv)
{
spin_lock(&dev_priv->svga_lock);
if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
......@@ -1254,7 +1254,7 @@ void vmw_svga_enable(struct vmw_private *dev_priv)
* Needs the reservation sem to be held in exclusive mode.
* Will not empty VRAM. VRAM must be emptied by caller.
*/
void __vmw_svga_disable(struct vmw_private *dev_priv)
static void __vmw_svga_disable(struct vmw_private *dev_priv)
{
spin_lock(&dev_priv->svga_lock);
if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
......
......@@ -178,8 +178,8 @@ struct vmw_marker_queue {
struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
__le32 *static_buffer;
u32 *dynamic_buffer;
u32 *static_buffer;
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
......@@ -405,7 +405,7 @@ struct vmw_private {
uint32_t stdu_max_height;
uint32_t initial_width;
uint32_t initial_height;
__le32 __iomem *mmio_virt;
u32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
uint32_t max_gmr_ids;
......
......@@ -1850,7 +1850,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
uint32_t size_remaining = *size;
uint32_t cmd_id;
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
cmd_id = ((uint32_t *)buf)[0];
switch (cmd_id) {
case SVGA_CMD_UPDATE:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
......@@ -2066,14 +2066,14 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
const struct vmw_cmd_entry *entry;
bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
cmd_id = ((uint32_t *)buf)[0];
/* Handle any none 3D commands */
if (unlikely(cmd_id < SVGA_CMD_MAX))
return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
cmd_id = le32_to_cpu(header->id);
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
cmd_id = header->id;
*size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
......@@ -2499,7 +2499,7 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
* If the function is interrupted by a signal while sleeping, it will return
* -ERESTARTSYS casted to a pointer error value.
*/
void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
void __user *user_commands,
void *kernel_commands,
u32 command_size,
......
......@@ -331,7 +331,7 @@ static void vmw_deferred_io(struct fb_info *info,
vmw_fb_dirty_flush(par);
};
struct fb_deferred_io vmw_defio = {
static struct fb_deferred_io vmw_defio = {
.delay = VMW_DIRTY_DELAY,
.deferred_io = vmw_deferred_io,
};
......@@ -706,7 +706,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->fix.smem_len = fb_size;
info->pseudo_palette = par->pseudo_palette;
info->screen_base = par->vmalloc;
info->screen_base = (char __iomem *)par->vmalloc;
info->screen_size = fb_size;
info->flags = FBINFO_DEFAULT;
......
......@@ -142,7 +142,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
......@@ -386,7 +386,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
__le32 __iomem *fifo_mem;
u32 __iomem *fifo_mem;
struct vmw_fence_obj *fence;
if (likely(!fman->seqno_valid))
......@@ -430,7 +430,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
__le32 __iomem *fifo_mem;
u32 __iomem *fifo_mem;
if (fence_is_signaled_locked(&fence->base))
return false;
......@@ -453,7 +453,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
rerun:
......
......@@ -31,7 +31,7 @@
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
......@@ -80,7 +80,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
......@@ -95,7 +95,7 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
......@@ -158,7 +158,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
static DEFINE_SPINLOCK(ping_lock);
unsigned long irq_flags;
......@@ -176,7 +176,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
......@@ -206,7 +206,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
......@@ -314,7 +314,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
......@@ -371,7 +371,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
if (reserveable)
iowrite32(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
return fifo_mem + (next_cmd >> 2);
return (void __force *) (fifo_mem +
(next_cmd >> 2));
} else {
need_bounce = true;
}
......@@ -414,7 +415,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
}
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
u32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
......@@ -436,7 +437,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
u32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
......@@ -455,10 +456,10 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
}
}
void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
......@@ -545,9 +546,9 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
void *fm;
u32 *fm;
int ret = 0;
uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
......@@ -573,11 +574,9 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
return 0;
}
*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
cmd_fence = (struct svga_fifo_cmd_fence *)
((unsigned long)fm + sizeof(__le32));
iowrite32(*seqno, &cmd_fence->fence);
*fm++ = SVGA_CMD_FENCE;
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_fifo_commit_flush(dev_priv, bytes);
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
vmw_update_seqno(dev_priv, fifo_state);
......
......@@ -63,7 +63,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
......@@ -158,7 +158,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_get_3d_cap_arg *) data;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t size;
__le32 __iomem *fifo_mem;
u32 __iomem *fifo_mem;
void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
void *bounce;
int ret;
......@@ -239,7 +239,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
int ret;
num_clips = arg->num_clips;
clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
if (unlikely(num_clips == 0))
return 0;
......@@ -322,7 +322,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
int ret;
num_clips = arg->num_clips;
clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
if (unlikely(num_clips == 0))
return 0;
......
......@@ -72,7 +72,7 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (dev_priv->last_read_seqno != seqno) {
......@@ -178,7 +178,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle) {
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
}
wake_up_all(&dev_priv->fence_queue);
......
......@@ -71,12 +71,12 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
memcpy(&cmd[1], image, image_size);
cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
cmd->cursor.id = cpu_to_le32(0);
cmd->cursor.width = cpu_to_le32(width);
cmd->cursor.height = cpu_to_le32(height);
cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
cmd->cursor.id = 0;
cmd->cursor.width = width;
cmd->cursor.height = height;
cmd->cursor.hotspotX = hotspotX;
cmd->cursor.hotspotY = hotspotY;
vmw_fifo_commit(dev_priv, cmd_size);
......@@ -123,7 +123,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
......@@ -1017,7 +1017,7 @@ static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
int vmw_kms_generic_present(struct vmw_private *dev_priv,
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct vmw_surface *surface,
......@@ -1785,7 +1785,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo = &buf->base;
int ret;
ttm_bo_reserve(bo, false, false, interruptible, 0);
ttm_bo_reserve(bo, false, false, interruptible, NULL);
ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
validate_as_mob);
if (ret)
......
......@@ -476,11 +476,11 @@ int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd[i].body.x = cpu_to_le32(clips->x1);
cmd[i].body.y = cpu_to_le32(clips->y1);
cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
cmd[i].header = SVGA_CMD_UPDATE;
cmd[i].body.x = clips->x1;
cmd[i].body.y = clips->y1;
cmd[i].body.width = clips->x2 - clips->x1;
cmd[i].body.height = clips->y2 - clips->y1;
}
vmw_fifo_commit(dev_priv, fifo_size);
......
......@@ -142,7 +142,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
cmd->header.size = sizeof(cmd->body);
cmd->body.type = type;
cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = otable->size;
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = mob->pt_level;
......@@ -430,15 +430,15 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
* *@addr according to the page table entry size.
*/
#if (VMW_PPN_SIZE == 8)
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
*((u64 *) *addr) = val >> PAGE_SHIFT;
*addr += 2;
}
#else
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
*(*addr)++ = val >> PAGE_SHIFT;
}
#endif
......@@ -460,7 +460,7 @@ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
unsigned long pt_page;
__le32 *addr, *save_addr;
u32 *addr, *save_addr;
unsigned long i;
struct page *page;
......@@ -641,7 +641,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob_id;
cmd->body.ptDepth = mob->pt_level;
cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
......
......@@ -39,12 +39,12 @@
#define VMWGFX_IRQSTATUS_PORT 0x8
struct svga_guest_mem_descriptor {
__le32 ppn;
__le32 num_pages;
u32 ppn;
u32 num_pages;
};
struct svga_fifo_cmd_fence {
__le32 fence;
u32 fence;
};
#define SVGA_SYNC_GENERIC 1
......
......@@ -121,6 +121,7 @@ static void vmw_resource_release(struct kref *kref)
int id;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
write_lock(&dev_priv->resource_lock);
res->avail = false;
list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
......@@ -156,20 +157,17 @@ static void vmw_resource_release(struct kref *kref)
kfree(res);
write_lock(&dev_priv->resource_lock);
if (id != -1)
idr_remove(idr, id);
write_unlock(&dev_priv->resource_lock);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
{
struct vmw_resource *res = *p_res;
struct vmw_private *dev_priv = res->dev_priv;
*p_res = NULL;
write_lock(&dev_priv->resource_lock);
kref_put(&res->kref, vmw_resource_release);
write_unlock(&dev_priv->resource_lock);
}
......@@ -260,17 +258,16 @@ void vmw_resource_activate(struct vmw_resource *res,
write_unlock(&dev_priv->resource_lock);
}
struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
struct idr *idr, int id)
{
struct vmw_resource *res;
read_lock(&dev_priv->resource_lock);
res = idr_find(idr, id);
if (res && res->avail)
kref_get(&res->kref);
else
if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
res = NULL;
read_unlock(&dev_priv->resource_lock);
if (unlikely(res == NULL))
......@@ -1306,7 +1303,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
* @res: The resource to evict.
* @interruptible: Whether to wait interruptible.
*/
int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
......
......@@ -450,7 +450,7 @@ static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
* Update the implicit fb to the current fb of this crtc.
* Must be called with the mode_config mutex held.
*/
void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
{
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
......
......@@ -407,7 +407,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
}
struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buffer,
size_t shader_size,
size_t offset,
......
......@@ -220,7 +220,7 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
cmd->body.format = cpu_to_le32(srf->format);
cmd->body.format = srf->format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
......@@ -1054,7 +1054,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
cmd->body.format = cpu_to_le32(srf->format);
cmd->body.format = srf->format;
cmd->body.numMipLevels = srf->mip_levels[0];
cmd->body.multisampleCount = srf->multisample_count;
cmd->body.autogenFilter = srf->autogen_filter;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment