Commit 10a6e5fe authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2023-10-13' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Weekly fixes, the core is msm and amdgpu with some scattered fixes
  across vmwgfx, panel and the core stuff.

  atomic-helper:
   - Relax checks for unregistered connectors

  dma-buf:
   - Work around race condition when retrieving fence timestamp

  gem:
   - Avoid OOB access in BO memory range

  panel:
   - boe-tv101wun-ml6: Fix flickering

  simpledrm:
   - Fix error output

  vwmgfx:
   - Fix size calculation in texture-state code
   - Ref GEM BOs in surfaces

  msm:
   - PHY/link training reset fix
   - msm8998 - correct highest bank bit
   - skip video mode if timing engine disabled
   - check irq_of_parse_and_map return code
   - add new lines to some prints
   - fail atomic check for max mdp clk test

  amdgpu:
   - Seamless boot fix
   - Fix TTM BO resource check
   - SI fix for doorbell handling"

* tag 'drm-fixes-2023-10-13' of git://anongit.freedesktop.org/drm/drm:
  drm/tiny: correctly print `struct resource *` on error
  drm: Do not overrun array in drm_gem_get_pages()
  drm/atomic-helper: relax unregistered connector check
  drm/panel: boe-tv101wum-nl6: Completely pull GPW to VGL before TP term
  drm/amdgpu: fix SI failure due to doorbells allocation
  drm/amdgpu: add missing NULL check
  drm/amd/display: Don't set dpms_off for seamless boot
  drm/vmwgfx: Keep a gem reference to user bos in surfaces
  drm/vmwgfx: fix typo of sizeof argument
  drm/msm/dpu: fail dpu_plane_atomic_check() based on mdp clk limits
  dma-buf: add dma_fence_timestamp helper
  drm/msm/dp: Add newlines to debug printks
  drm/msm/dpu: change _dpu_plane_calc_bw() to use u64 to avoid overflow
  drm/msm/dsi: fix irq_of_parse_and_map() error checking
  drm/msm/dsi: skip the wait for video mode done if not applicable
  drm/msm/mdss: fix highest-bank-bit for msm8998
  drm/msm/dp: do not reinitialize phy unless retry during link training
parents ce583d5f 30873697
......@@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
if (!dma_fence_is_signaled(tmp)) {
++count;
} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
&tmp->flags)) {
if (ktime_after(tmp->timestamp, timestamp))
timestamp = tmp->timestamp;
} else {
/*
* Use the current time if the fence is
* currently signaling.
*/
timestamp = ktime_get();
ktime_t t = dma_fence_timestamp(tmp);
if (ktime_after(t, timestamp))
timestamp = t;
}
}
}
......
......@@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence,
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
cpu_relax();
info->timestamp_ns =
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
ktime_to_ns(fence->timestamp) :
ktime_set(0, 0);
dma_fence_is_signaled(fence) ?
ktime_to_ns(dma_fence_timestamp(fence)) :
ktime_set(0, 0);
return info->status;
}
......
......@@ -142,6 +142,10 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
int r;
int size;
/* SI HW does not have doorbells, skip allocation */
if (adev->doorbell.num_kernel_doorbells == 0)
return 0;
/* Reserve first num_kernel_doorbells (page-aligned) for kernel ops */
size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE);
......
......@@ -252,7 +252,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
......
......@@ -1262,6 +1262,9 @@ static void disable_vbios_mode_if_required(
if (stream == NULL)
continue;
if (stream->apply_seamless_boot_optimization)
continue;
// only looking for first odm pipe
if (pipe->prev_odm_pipe)
continue;
......
......@@ -290,7 +290,8 @@ static int
update_connector_routing(struct drm_atomic_state *state,
struct drm_connector *connector,
struct drm_connector_state *old_connector_state,
struct drm_connector_state *new_connector_state)
struct drm_connector_state *new_connector_state,
bool added_by_user)
{
const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
......@@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,
* there's a chance the connector may have been destroyed during the
* process, but it's better to ignore that then cause
* drm_atomic_helper_resume() to fail.
*
* Last, we want to ignore connector registration when the connector
* was not pulled in the atomic state by user-space (ie, was pulled
* in by the driver, e.g. when updating a DP-MST stream).
*/
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
crtc_state->active) {
added_by_user && crtc_state->active) {
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] is not registered\n",
connector->base.id, connector->name);
......@@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_connector *connector;
struct drm_connector_state *old_connector_state, *new_connector_state;
int i, ret;
unsigned int connectors_mask = 0;
unsigned int connectors_mask = 0, user_connectors_mask = 0;
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
user_connectors_mask |= BIT(i);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
bool has_connectors =
......@@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
*/
ret = update_connector_routing(state, connector,
old_connector_state,
new_connector_state);
new_connector_state,
BIT(i) & user_connectors_mask);
if (ret)
return ret;
if (old_connector_state->crtc) {
......
......@@ -540,7 +540,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
struct page **pages;
struct folio *folio;
struct folio_batch fbatch;
int i, j, npages;
long i, j, npages;
if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL);
......@@ -564,11 +564,13 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
i = 0;
while (i < npages) {
long nr;
folio = shmem_read_folio_gfp(mapping, i,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
goto fail;
for (j = 0; j < folio_nr_pages(folio); j++, i++)
nr = min(npages - i, folio_nr_pages(folio));
for (j = 0; j < nr; j++, i++)
pages[i] = folio_file_page(folio, i);
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
......
......@@ -119,6 +119,7 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
struct dpu_sw_pipe_cfg *pipe_cfg)
{
int src_width, src_height, dst_height, fps;
u64 plane_pixel_rate, plane_bit_rate;
u64 plane_prefill_bw;
u64 plane_bw;
u32 hw_latency_lines;
......@@ -136,13 +137,12 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
scale_factor = src_height > dst_height ?
mult_frac(src_height, 1, dst_height) : 1;
plane_bw =
src_width * mode->vtotal * fps * fmt->bpp *
scale_factor;
plane_pixel_rate = src_width * mode->vtotal * fps;
plane_bit_rate = plane_pixel_rate * fmt->bpp;
plane_prefill_bw =
src_width * hw_latency_lines * fps * fmt->bpp *
scale_factor * mode->vtotal;
plane_bw = plane_bit_rate * scale_factor;
plane_prefill_bw = plane_bw * hw_latency_lines;
if ((vbp+vpw) > hw_latency_lines)
do_div(plane_prefill_bw, (vbp+vpw));
......@@ -733,9 +733,11 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
const struct dpu_format *fmt)
const struct dpu_format *fmt,
const struct drm_display_mode *mode)
{
uint32_t min_src_size;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
......@@ -774,6 +776,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
return -EINVAL;
}
/* max clk check */
if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) {
DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n");
return -E2BIG;
}
return 0;
}
......@@ -899,12 +907,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
}
ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt);
ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
if (ret)
return ret;
if (r_pipe->sspp) {
ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt);
ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
&crtc_state->adjusted_mode);
if (ret)
return ret;
}
......
......@@ -1774,13 +1774,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
return rc;
while (--link_train_max_retries) {
rc = dp_ctrl_reinitialize_mainlink(ctrl);
if (rc) {
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
rc);
break;
}
training_step = DP_TRAINING_NONE;
rc = dp_ctrl_setup_main_link(ctrl, &training_step);
if (rc == 0) {
......@@ -1832,6 +1825,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
/* stop link training before start re training */
dp_ctrl_clear_training_pattern(ctrl);
}
rc = dp_ctrl_reinitialize_mainlink(ctrl);
if (rc) {
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
break;
}
}
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
......
......@@ -1090,7 +1090,7 @@ int dp_link_process_request(struct dp_link *dp_link)
} else if (dp_link_read_psr_error_status(link)) {
DRM_ERROR("PSR IRQ_HPD received\n");
} else if (dp_link_psr_capability_changed(link)) {
drm_dbg_dp(link->drm_dev, "PSR Capability changed");
drm_dbg_dp(link->drm_dev, "PSR Capability changed\n");
} else {
ret = dp_link_process_link_status_update(link);
if (!ret) {
......@@ -1107,7 +1107,7 @@ int dp_link_process_request(struct dp_link *dp_link)
}
}
drm_dbg_dp(link->drm_dev, "sink request=%#x",
drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
dp_link->sink_request);
return ret;
}
......
......@@ -1082,9 +1082,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
{
u32 data;
if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
return;
data = dsi_read(msm_host, REG_DSI_STATUS0);
/* if video mode engine is not busy, its because
* either timing engine was not turned on or the
* DSI controller has finished transmitting the video
* data already, so no need to wait in those cases
*/
if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
return;
if (msm_host->power_on && msm_host->enabled) {
dsi_wait4video_done(msm_host);
/* delay 4 ms to skip BLLP */
......@@ -1894,10 +1906,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (msm_host->irq < 0) {
ret = msm_host->irq;
dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
return ret;
if (!msm_host->irq) {
dev_err(&pdev->dev, "failed to get irq\n");
return -EINVAL;
}
/* do not autoenable, will be enabled later */
......
......@@ -511,7 +511,7 @@ static int mdss_remove(struct platform_device *pdev)
static const struct msm_mdss_data msm8998_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
.highest_bank_bit = 1,
.highest_bank_bit = 2,
};
static const struct msm_mdss_data qcm2290_data = {
......
......@@ -1342,9 +1342,7 @@ static const struct panel_init_cmd starry_himax83102_j02_init_cmd[] = {
_INIT_DCS_CMD(0xB1, 0x01, 0xBF, 0x11),
_INIT_DCS_CMD(0xCB, 0x86),
_INIT_DCS_CMD(0xD2, 0x3C, 0xFA),
_INIT_DCS_CMD(0xE9, 0xC5),
_INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
_INIT_DCS_CMD(0xE9, 0x3F),
_INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
_INIT_DCS_CMD(0xE7, 0x02, 0x00, 0x28, 0x01, 0x7E, 0x0F, 0x7E, 0x10, 0xA0, 0x00, 0x00, 0x20, 0x40, 0x50, 0x40),
_INIT_DCS_CMD(0xBD, 0x02),
_INIT_DCS_CMD(0xD8, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0),
......
......@@ -929,7 +929,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (next) {
next->s_fence->scheduled.timestamp =
job->s_fence->finished.timestamp;
dma_fence_timestamp(&job->s_fence->finished);
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
......
......@@ -745,7 +745,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
if (ret) {
drm_err(dev, "could not acquire memory range %pr: %d\n", &res, ret);
drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
return ERR_PTR(ret);
}
......
......@@ -34,6 +34,8 @@
static void vmw_bo_release(struct vmw_bo *vbo)
{
WARN_ON(vbo->tbo.base.funcs &&
kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
drm_gem_object_release(&vbo->tbo.base);
}
......@@ -497,7 +499,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
vmw_user_bo_unref(vmw_bo);
vmw_user_bo_unref(&vmw_bo);
}
return ret;
......@@ -539,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
return ret;
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_user_bo_unref(vbo);
vmw_user_bo_unref(&vbo);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
......@@ -612,7 +614,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
}
*out = to_vmw_bo(gobj);
ttm_bo_get(&(*out)->tbo);
return 0;
}
......
......@@ -195,12 +195,19 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
return buf;
}
static inline void vmw_user_bo_unref(struct vmw_bo *vbo)
static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)
{
if (vbo) {
ttm_bo_put(&vbo->tbo);
drm_gem_object_put(&vbo->tbo.base);
}
drm_gem_object_get(&vbo->tbo.base);
return vbo;
}
static inline void vmw_user_bo_unref(struct vmw_bo **buf)
{
struct vmw_bo *tmp_buf = *buf;
*buf = NULL;
if (tmp_buf)
drm_gem_object_put(&tmp_buf->tbo.base);
}
static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
......
......@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
ret = vmw_bo_create(dev_priv, &bo_params, &buf);
ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
......@@ -502,7 +502,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
vmw_resource_mob_attach(res);
/* Let go of the old mob. */
vmw_bo_unreference(&old_buf);
vmw_user_bo_unref(&old_buf);
res->id = vcotbl->type;
ret = dma_resv_reserve_fences(bo->base.resv, 1);
......@@ -521,7 +521,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
out_wait:
ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
vmw_bo_unreference(&buf);
vmw_user_bo_unref(&buf);
out_done:
MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
......
......@@ -853,6 +853,10 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
/**
* GEM related functionality - vmwgfx_gem.c
*/
struct vmw_bo_params;
int vmw_gem_object_create(struct vmw_private *vmw,
struct vmw_bo_params *params,
struct vmw_bo **p_vbo);
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
......
......@@ -1151,7 +1151,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
SVGAMobId *id,
struct vmw_bo **vmw_bo_p)
{
struct vmw_bo *vmw_bo;
struct vmw_bo *vmw_bo, *tmp_bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
......@@ -1164,7 +1164,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
vmw_user_bo_unref(vmw_bo);
tmp_bo = vmw_bo;
vmw_user_bo_unref(&tmp_bo);
if (unlikely(ret != 0))
return ret;
......@@ -1206,7 +1207,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
SVGAGuestPtr *ptr,
struct vmw_bo **vmw_bo_p)
{
struct vmw_bo *vmw_bo;
struct vmw_bo *vmw_bo, *tmp_bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
......@@ -1220,7 +1221,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
vmw_user_bo_unref(vmw_bo);
tmp_bo = vmw_bo;
vmw_user_bo_unref(&tmp_bo);
if (unlikely(ret != 0))
return ret;
......@@ -1619,7 +1621,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
((unsigned long) header + header->size + sizeof(*header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(*cmd));
struct vmw_resource *ctx;
......
......@@ -111,6 +111,20 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vm_ops = &vmw_vm_ops,
};
int vmw_gem_object_create(struct vmw_private *vmw,
struct vmw_bo_params *params,
struct vmw_bo **p_vbo)
{
int ret = vmw_bo_create(vmw, params, p_vbo);
if (ret != 0)
goto out_no_bo;
(*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
out_no_bo:
return ret;
}
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
......@@ -126,12 +140,10 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
.pin = false
};
ret = vmw_bo_create(dev_priv, &params, p_vbo);
ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
(*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
out_no_bo:
return ret;
......
......@@ -1471,8 +1471,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
vmw_bo_unreference(&res->guest_memory_bo);
res->guest_memory_bo = vmw_bo_reference(bo_mob);
vmw_user_bo_unref(&res->guest_memory_bo);
res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
res->guest_memory_offset = 0;
vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
......@@ -1666,7 +1666,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
vmw_user_bo_unref(bo);
vmw_user_bo_unref(&bo);
if (surface)
vmw_surface_unreference(&surface);
......
......@@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_user_bo_unref(buf);
vmw_user_bo_unref(&buf);
out_unlock:
mutex_unlock(&overlay->mutex);
......
......@@ -141,7 +141,7 @@ static void vmw_resource_release(struct kref *kref)
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
ttm_bo_unreserve(bo);
vmw_bo_unreference(&res->guest_memory_bo);
vmw_user_bo_unref(&res->guest_memory_bo);
}
if (likely(res->hw_destroy != NULL)) {
......@@ -338,7 +338,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
......@@ -457,11 +457,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
vmw_resource_mob_detach(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
vmw_bo_unreference(&res->guest_memory_bo);
vmw_user_bo_unref(&res->guest_memory_bo);
}
if (new_guest_memory_bo) {
res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo);
res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
/*
* The validation code should already have added a
......@@ -551,7 +551,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
if (guest_memory_dirty)
vmw_bo_unreference(&res->guest_memory_bo);
vmw_user_bo_unref(&res->guest_memory_bo);
return ret;
}
......@@ -727,7 +727,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
goto out_no_validate;
else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
WARN_ON_ONCE(vmw_resource_mob_attached(res));
vmw_bo_unreference(&res->guest_memory_bo);
vmw_user_bo_unref(&res->guest_memory_bo);
}
return 0;
......
......@@ -180,7 +180,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
res->guest_memory_size = size;
if (byte_code) {
res->guest_memory_bo = vmw_bo_reference(byte_code);
res->guest_memory_bo = vmw_user_bo_ref(byte_code);
res->guest_memory_offset = offset;
}
shader->size = size;
......@@ -809,7 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
shader_type, num_input_sig,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_user_bo_unref(buffer);
vmw_user_bo_unref(&buffer);
return ret;
}
......
......@@ -686,9 +686,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
if (res->guest_memory_bo)
drm_gem_object_put(&res->guest_memory_bo->tbo.base);
*p_base = NULL;
vmw_resource_unreference(&res);
}
......@@ -855,23 +852,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
* expect a backup buffer to be present.
*/
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
ret = vmw_gem_object_create_with_handle(dev_priv,
file_priv,
res->guest_memory_size,
&backup_handle,
&res->guest_memory_bo);
struct vmw_bo_params params = {
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = res->guest_memory_size,
.pin = false
};
ret = vmw_gem_object_create(dev_priv,
&params,
&res->guest_memory_bo);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
vmw_bo_reference(res->guest_memory_bo);
/*
* We don't expose the handle to the userspace and surface
* already holds a gem reference
*/
drm_gem_handle_delete(file_priv, backup_handle);
}
tmp = vmw_resource_reference(&srf->res);
......@@ -1512,7 +1507,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (ret == 0) {
if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_bo_unreference(&res->guest_memory_bo);
vmw_user_bo_unref(&res->guest_memory_bo);
ret = -EINVAL;
goto out_unlock;
} else {
......@@ -1526,8 +1521,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
res->guest_memory_size,
&backup_handle,
&res->guest_memory_bo);
if (ret == 0)
vmw_bo_reference(res->guest_memory_bo);
}
if (unlikely(ret != 0)) {
......
......@@ -568,6 +568,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
fence->error = error;
}
/**
* dma_fence_timestamp - helper to get the completion timestamp of a fence
* @fence: fence to get the timestamp from.
*
* After a fence is signaled the timestamp is updated with the signaling time,
* but setting the timestamp can race with tasks waiting for the signaling. This
* helper busy waits for the correct timestamp to appear.
*/
static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
{
if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
return ktime_get();
while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
cpu_relax();
return fence->timestamp;
}
signed long dma_fence_wait_timeout(struct dma_fence *,
bool intr, signed long timeout);
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment