Commit 24f67d82 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2021-10-01' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Daniel Vetter:
 "Dave is out on a long w/e, should be back next week.

  Nothing nefarious, just a bunch of driver fixes: amdgpu, i915, tegra,
  and one exynos driver fix"

* tag 'drm-fixes-2021-10-01' of git://anongit.freedesktop.org/drm/drm:
  drm/amdgpu: force exit gfxoff on sdma resume for rmb s0ix
  drm/amdgpu: check tiling flags when creating FB on GFX8-
  drm/amd/display: Pass PCI deviceid into DC
  drm/amd/display: initialize backlight_ramping_override to false
  drm/amdgpu: correct initial cp_hqd_quantum for gfx9
  drm/amd/display: Fix Display Flicker on embedded panels
  drm/amdgpu: fix gart.bo pin_count leak
  drm/i915: Remove warning from the rps worker
  drm/i915/request: fix early tracepoints
  drm/i915/guc, docs: Fix pdfdocs build error by removing nested grid
  gpu: host1x: Plug potential memory leak
  gpu/host1x: fence: Make spinlock static
  drm/tegra: uapi: Fix wrong mapping end address in case of disabled IOMMU
  drm/tegra: dc: Remove unused variables
  drm/exynos: Make use of the helper function devm_platform_ioremap_resource()
  drm/i915/gvt: fix the usage of ww lock in gvt scheduler.
parents 89e50359 78ea8141
...@@ -837,6 +837,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) ...@@ -837,6 +837,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
return 0; return 0;
} }
/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
{
u64 micro_tile_mode;
/* Zero swizzle mode means linear */
if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
return 0;
micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
switch (micro_tile_mode) {
case 0: /* DISPLAY */
case 3: /* RENDER */
return 0;
default:
drm_dbg_kms(afb->base.dev,
"Micro tile mode %llu not supported for scanout\n",
micro_tile_mode);
return -EINVAL;
}
}
static void get_block_dimensions(unsigned int block_log2, unsigned int cpp, static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
unsigned int *width, unsigned int *height) unsigned int *width, unsigned int *height)
{ {
...@@ -1103,6 +1125,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, ...@@ -1103,6 +1125,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj) struct drm_gem_object *obj)
{ {
struct amdgpu_device *adev = drm_to_adev(dev);
int ret, i; int ret, i;
/* /*
...@@ -1122,6 +1145,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, ...@@ -1122,6 +1145,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret) if (ret)
return ret; return ret;
if (!dev->mode_config.allow_fb_modifiers) {
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
"GFX9+ requires FB check based on format modifier\n");
ret = check_tiling_flags_gfx6(rfb);
if (ret)
return ret;
}
if (dev->mode_config.allow_fb_modifiers && if (dev->mode_config.allow_fb_modifiers &&
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) { !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
ret = convert_tiling_flags_to_modifier(rfb); ret = convert_tiling_flags_to_modifier(rfb);
......
...@@ -3599,7 +3599,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) ...@@ -3599,7 +3599,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
/* set static priority for a queue/ring */ /* set static priority for a queue/ring */
gfx_v9_0_mqd_set_priority(ring, mqd); gfx_v9_0_mqd_set_priority(ring, mqd);
mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
/* map_queues packet doesn't need activate the queue, /* map_queues packet doesn't need activate the queue,
* so only kiq need set this field. * so only kiq need set this field.
......
...@@ -1098,6 +1098,8 @@ static int gmc_v10_0_hw_fini(void *handle) ...@@ -1098,6 +1098,8 @@ static int gmc_v10_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v10_0_gart_disable(adev);
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */ /* full access mode, so don't touch any GMC register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
...@@ -1106,7 +1108,6 @@ static int gmc_v10_0_hw_fini(void *handle) ...@@ -1106,7 +1108,6 @@ static int gmc_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v10_0_gart_disable(adev);
return 0; return 0;
} }
......
...@@ -1794,6 +1794,8 @@ static int gmc_v9_0_hw_fini(void *handle) ...@@ -1794,6 +1794,8 @@ static int gmc_v9_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_gart_disable(adev);
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */ /* full access mode, so don't touch any GMC register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
...@@ -1802,7 +1804,6 @@ static int gmc_v9_0_hw_fini(void *handle) ...@@ -1802,7 +1804,6 @@ static int gmc_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v9_0_gart_disable(adev);
return 0; return 0;
} }
......
...@@ -868,6 +868,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) ...@@ -868,6 +868,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000); msleep(1000);
} }
/* TODO: check whether can submit a doorbell request to raise
* a doorbell fence to exit gfxoff.
*/
if (adev->in_s0ix)
amdgpu_gfx_off_ctrl(adev, false);
sdma_v5_2_soft_reset(adev); sdma_v5_2_soft_reset(adev);
/* unhalt the MEs */ /* unhalt the MEs */
sdma_v5_2_enable(adev, true); sdma_v5_2_enable(adev, true);
...@@ -876,6 +882,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) ...@@ -876,6 +882,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
/* start the gfx rings and rlc compute queues */ /* start the gfx rings and rlc compute queues */
r = sdma_v5_2_gfx_resume(adev); r = sdma_v5_2_gfx_resume(adev);
if (adev->in_s0ix)
amdgpu_gfx_off_ctrl(adev, true);
if (r) if (r)
return r; return r;
r = sdma_v5_2_rlc_resume(adev); r = sdma_v5_2_rlc_resume(adev);
......
...@@ -1115,6 +1115,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1115,6 +1115,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.pci_revision_id = adev->pdev->revision; init_data.asic_id.pci_revision_id = adev->pdev->revision;
init_data.asic_id.hw_internal_rev = adev->external_rev_id; init_data.asic_id.hw_internal_rev = adev->external_rev_id;
init_data.asic_id.chip_id = adev->pdev->device;
init_data.asic_id.vram_width = adev->gmc.vram_width; init_data.asic_id.vram_width = adev->gmc.vram_width;
/* TODO: initialize init_data.asic_id.vram_type here!!!! */ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
...@@ -1719,6 +1720,7 @@ static int dm_late_init(void *handle) ...@@ -1719,6 +1720,7 @@ static int dm_late_init(void *handle)
linear_lut[i] = 0xFFFF * i / 15; linear_lut[i] = 0xFFFF * i / 15;
params.set = 0; params.set = 0;
params.backlight_ramping_override = false;
params.backlight_ramping_start = 0xCCCC; params.backlight_ramping_start = 0xCCCC;
params.backlight_ramping_reduction = 0xCCCCCCCC; params.backlight_ramping_reduction = 0xCCCCCCCC;
params.backlight_lut_array_size = 16; params.backlight_lut_array_size = 16;
......
...@@ -1826,14 +1826,13 @@ bool perform_link_training_with_retries( ...@@ -1826,14 +1826,13 @@ bool perform_link_training_with_retries(
if (panel_mode == DP_PANEL_MODE_EDP) { if (panel_mode == DP_PANEL_MODE_EDP) {
struct cp_psp *cp_psp = &stream->ctx->cp_psp; struct cp_psp *cp_psp = &stream->ctx->cp_psp;
if (cp_psp && cp_psp->funcs.enable_assr) { if (cp_psp && cp_psp->funcs.enable_assr)
if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) { /* ASSR is bound to fail with unsigned PSP
/* since eDP implies ASSR on, change panel * verstage used during devlopment phase.
* mode to disable ASSR * Report and continue with eDP panel mode to
*/ * perform eDP link training with right settings
panel_mode = DP_PANEL_MODE_DEFAULT; */
} cp_psp->funcs.enable_assr(cp_psp->handle, link);
}
} }
#endif #endif
......
...@@ -793,7 +793,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev) ...@@ -793,7 +793,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct decon_context *ctx; struct decon_context *ctx;
struct resource *res;
int ret; int ret;
int i; int i;
...@@ -818,8 +817,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev) ...@@ -818,8 +817,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->clks[i] = clk; ctx->clks[i] = clk;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ctx->addr = devm_platform_ioremap_resource(pdev, 0);
ctx->addr = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->addr)) if (IS_ERR(ctx->addr))
return PTR_ERR(ctx->addr); return PTR_ERR(ctx->addr);
......
...@@ -1738,7 +1738,6 @@ static const struct component_ops exynos_dsi_component_ops = { ...@@ -1738,7 +1738,6 @@ static const struct component_ops exynos_dsi_component_ops = {
static int exynos_dsi_probe(struct platform_device *pdev) static int exynos_dsi_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res;
struct exynos_dsi *dsi; struct exynos_dsi *dsi;
int ret, i; int ret, i;
...@@ -1789,8 +1788,7 @@ static int exynos_dsi_probe(struct platform_device *pdev) ...@@ -1789,8 +1788,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
} }
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
dsi->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->reg_base)) if (IS_ERR(dsi->reg_base))
return PTR_ERR(dsi->reg_base); return PTR_ERR(dsi->reg_base);
......
...@@ -85,7 +85,6 @@ struct fimc_scaler { ...@@ -85,7 +85,6 @@ struct fimc_scaler {
/* /*
* A structure of fimc context. * A structure of fimc context.
* *
* @regs_res: register resources.
* @regs: memory mapped io registers. * @regs: memory mapped io registers.
* @lock: locking of operations. * @lock: locking of operations.
* @clocks: fimc clocks. * @clocks: fimc clocks.
...@@ -103,7 +102,6 @@ struct fimc_context { ...@@ -103,7 +102,6 @@ struct fimc_context {
struct exynos_drm_ipp_formats *formats; struct exynos_drm_ipp_formats *formats;
unsigned int num_formats; unsigned int num_formats;
struct resource *regs_res;
void __iomem *regs; void __iomem *regs;
spinlock_t lock; spinlock_t lock;
struct clk *clocks[FIMC_CLKS_MAX]; struct clk *clocks[FIMC_CLKS_MAX];
...@@ -1327,8 +1325,7 @@ static int fimc_probe(struct platform_device *pdev) ...@@ -1327,8 +1325,7 @@ static int fimc_probe(struct platform_device *pdev)
ctx->num_formats = num_formats; ctx->num_formats = num_formats;
/* resource memory */ /* resource memory */
ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ctx->regs = devm_platform_ioremap_resource(pdev, 0);
ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
if (IS_ERR(ctx->regs)) if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs); return PTR_ERR(ctx->regs);
......
...@@ -1202,9 +1202,7 @@ static int fimd_probe(struct platform_device *pdev) ...@@ -1202,9 +1202,7 @@ static int fimd_probe(struct platform_device *pdev)
return PTR_ERR(ctx->lcd_clk); return PTR_ERR(ctx->lcd_clk);
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ctx->regs = devm_platform_ioremap_resource(pdev, 0);
ctx->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->regs)) if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs); return PTR_ERR(ctx->regs);
......
...@@ -1449,7 +1449,6 @@ static const struct component_ops g2d_component_ops = { ...@@ -1449,7 +1449,6 @@ static const struct component_ops g2d_component_ops = {
static int g2d_probe(struct platform_device *pdev) static int g2d_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res;
struct g2d_data *g2d; struct g2d_data *g2d;
int ret; int ret;
...@@ -1491,9 +1490,7 @@ static int g2d_probe(struct platform_device *pdev) ...@@ -1491,9 +1490,7 @@ static int g2d_probe(struct platform_device *pdev)
clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); g2d->regs = devm_platform_ioremap_resource(pdev, 0);
g2d->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(g2d->regs)) { if (IS_ERR(g2d->regs)) {
ret = PTR_ERR(g2d->regs); ret = PTR_ERR(g2d->regs);
goto err_put_clk; goto err_put_clk;
......
...@@ -86,7 +86,6 @@ struct gsc_scaler { ...@@ -86,7 +86,6 @@ struct gsc_scaler {
/* /*
* A structure of gsc context. * A structure of gsc context.
* *
* @regs_res: register resources.
* @regs: memory mapped io registers. * @regs: memory mapped io registers.
* @gsc_clk: gsc gate clock. * @gsc_clk: gsc gate clock.
* @sc: scaler infomations. * @sc: scaler infomations.
...@@ -103,7 +102,6 @@ struct gsc_context { ...@@ -103,7 +102,6 @@ struct gsc_context {
struct exynos_drm_ipp_formats *formats; struct exynos_drm_ipp_formats *formats;
unsigned int num_formats; unsigned int num_formats;
struct resource *regs_res;
void __iomem *regs; void __iomem *regs;
const char **clk_names; const char **clk_names;
struct clk *clocks[GSC_MAX_CLOCKS]; struct clk *clocks[GSC_MAX_CLOCKS];
...@@ -1272,9 +1270,7 @@ static int gsc_probe(struct platform_device *pdev) ...@@ -1272,9 +1270,7 @@ static int gsc_probe(struct platform_device *pdev)
} }
} }
/* resource memory */ ctx->regs = devm_platform_ioremap_resource(pdev, 0);
ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
if (IS_ERR(ctx->regs)) if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs); return PTR_ERR(ctx->regs);
......
...@@ -278,7 +278,6 @@ static const struct component_ops rotator_component_ops = { ...@@ -278,7 +278,6 @@ static const struct component_ops rotator_component_ops = {
static int rotator_probe(struct platform_device *pdev) static int rotator_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *regs_res;
struct rot_context *rot; struct rot_context *rot;
const struct rot_variant *variant; const struct rot_variant *variant;
int irq; int irq;
...@@ -292,8 +291,7 @@ static int rotator_probe(struct platform_device *pdev) ...@@ -292,8 +291,7 @@ static int rotator_probe(struct platform_device *pdev)
rot->formats = variant->formats; rot->formats = variant->formats;
rot->num_formats = variant->num_formats; rot->num_formats = variant->num_formats;
rot->dev = dev; rot->dev = dev;
regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rot->regs = devm_platform_ioremap_resource(pdev, 0);
rot->regs = devm_ioremap_resource(dev, regs_res);
if (IS_ERR(rot->regs)) if (IS_ERR(rot->regs))
return PTR_ERR(rot->regs); return PTR_ERR(rot->regs);
......
...@@ -485,7 +485,6 @@ static const struct component_ops scaler_component_ops = { ...@@ -485,7 +485,6 @@ static const struct component_ops scaler_component_ops = {
static int scaler_probe(struct platform_device *pdev) static int scaler_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *regs_res;
struct scaler_context *scaler; struct scaler_context *scaler;
int irq; int irq;
int ret, i; int ret, i;
...@@ -498,8 +497,7 @@ static int scaler_probe(struct platform_device *pdev) ...@@ -498,8 +497,7 @@ static int scaler_probe(struct platform_device *pdev)
(struct scaler_data *)of_device_get_match_data(dev); (struct scaler_data *)of_device_get_match_data(dev);
scaler->dev = dev; scaler->dev = dev;
regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); scaler->regs = devm_platform_ioremap_resource(pdev, 0);
scaler->regs = devm_ioremap_resource(dev, regs_res);
if (IS_ERR(scaler->regs)) if (IS_ERR(scaler->regs))
return PTR_ERR(scaler->regs); return PTR_ERR(scaler->regs);
......
...@@ -1957,7 +1957,6 @@ static int hdmi_probe(struct platform_device *pdev) ...@@ -1957,7 +1957,6 @@ static int hdmi_probe(struct platform_device *pdev)
struct hdmi_audio_infoframe *audio_infoframe; struct hdmi_audio_infoframe *audio_infoframe;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct hdmi_context *hdata; struct hdmi_context *hdata;
struct resource *res;
int ret; int ret;
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
...@@ -1979,8 +1978,7 @@ static int hdmi_probe(struct platform_device *pdev) ...@@ -1979,8 +1978,7 @@ static int hdmi_probe(struct platform_device *pdev)
return ret; return ret;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdata->regs = devm_platform_ioremap_resource(pdev, 0);
hdata->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hdata->regs)) { if (IS_ERR(hdata->regs)) {
ret = PTR_ERR(hdata->regs); ret = PTR_ERR(hdata->regs);
return ret; return ret;
......
...@@ -882,8 +882,6 @@ void intel_rps_park(struct intel_rps *rps) ...@@ -882,8 +882,6 @@ void intel_rps_park(struct intel_rps *rps)
if (!intel_rps_is_enabled(rps)) if (!intel_rps_is_enabled(rps))
return; return;
GEM_BUG_ON(atomic_read(&rps->num_waiters));
if (!intel_rps_clear_active(rps)) if (!intel_rps_clear_active(rps))
return; return;
......
...@@ -102,11 +102,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64); ...@@ -102,11 +102,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
* | +-------+--------------------------------------------------------------+ * | +-------+--------------------------------------------------------------+
* | | 7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message | * | | 7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message |
* +---+-------+--------------------------------------------------------------+ * +---+-------+--------------------------------------------------------------+
* | 1 | 31:0 | +--------------------------------------------------------+ | * | 1 | 31:0 | |
* +---+-------+ | | | * +---+-------+ |
* |...| | | Embedded `HXG Message`_ | | * |...| | [Embedded `HXG Message`_] |
* +---+-------+ | | | * +---+-------+ |
* | n | 31:0 | +--------------------------------------------------------+ | * | n | 31:0 | |
* +---+-------+--------------------------------------------------------------+ * +---+-------+--------------------------------------------------------------+
*/ */
......
...@@ -38,11 +38,11 @@ ...@@ -38,11 +38,11 @@
* +---+-------+--------------------------------------------------------------+ * +---+-------+--------------------------------------------------------------+
* | | Bits | Description | * | | Bits | Description |
* +===+=======+==============================================================+ * +===+=======+==============================================================+
* | 0 | 31:0 | +--------------------------------------------------------+ | * | 0 | 31:0 | |
* +---+-------+ | | | * +---+-------+ |
* |...| | | Embedded `HXG Message`_ | | * |...| | [Embedded `HXG Message`_] |
* +---+-------+ | | | * +---+-------+ |
* | n | 31:0 | +--------------------------------------------------------+ | * | n | 31:0 | |
* +---+-------+--------------------------------------------------------------+ * +---+-------+--------------------------------------------------------------+
*/ */
......
...@@ -576,7 +576,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -576,7 +576,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
/* No one is going to touch shadow bb from now on. */ /* No one is going to touch shadow bb from now on. */
i915_gem_object_flush_map(bb->obj); i915_gem_object_flush_map(bb->obj);
i915_gem_object_unlock(bb->obj); i915_gem_ww_ctx_fini(&ww);
} }
} }
return 0; return 0;
...@@ -630,7 +630,7 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -630,7 +630,7 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return ret; return ret;
} }
i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); i915_gem_ww_ctx_fini(&ww);
/* FIXME: we are not tracking our pinned VMA leaving it /* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon * up to the core to fix up the stray pin_count upon
......
...@@ -829,8 +829,6 @@ static void __i915_request_ctor(void *arg) ...@@ -829,8 +829,6 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->submit, submit_notify); i915_sw_fence_init(&rq->submit, submit_notify);
i915_sw_fence_init(&rq->semaphore, semaphore_notify); i915_sw_fence_init(&rq->semaphore, semaphore_notify);
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
rq->capture_list = NULL; rq->capture_list = NULL;
init_llist_head(&rq->execute_cb); init_llist_head(&rq->execute_cb);
...@@ -905,17 +903,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -905,17 +903,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->ring = ce->ring; rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask; rq->execution_mask = ce->engine->mask;
kref_init(&rq->fence.refcount);
rq->fence.flags = 0;
rq->fence.error = 0;
INIT_LIST_HEAD(&rq->fence.cb_list);
ret = intel_timeline_get_seqno(tl, rq, &seqno); ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret) if (ret)
goto err_free; goto err_free;
rq->fence.context = tl->fence_context; dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
rq->fence.seqno = seqno; tl->fence_context, seqno);
RCU_INIT_POINTER(rq->timeline, tl); RCU_INIT_POINTER(rq->timeline, tl);
rq->hwsp_seqno = tl->hwsp_seqno; rq->hwsp_seqno = tl->hwsp_seqno;
......
...@@ -1845,7 +1845,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc, ...@@ -1845,7 +1845,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
bool prepare_bandwidth_transition) bool prepare_bandwidth_transition)
{ {
const struct tegra_plane_state *old_tegra_state, *new_tegra_state; const struct tegra_plane_state *old_tegra_state, *new_tegra_state;
const struct tegra_dc_state *old_dc_state, *new_dc_state;
u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw; u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw;
const struct drm_plane_state *old_plane_state; const struct drm_plane_state *old_plane_state;
const struct drm_crtc_state *old_crtc_state; const struct drm_crtc_state *old_crtc_state;
...@@ -1858,8 +1857,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc, ...@@ -1858,8 +1857,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
return; return;
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
old_dc_state = to_const_dc_state(old_crtc_state);
new_dc_state = to_const_dc_state(crtc->state);
if (!crtc->state->active) { if (!crtc->state->active) {
if (!old_crtc_state->active) if (!old_crtc_state->active)
......
...@@ -35,12 +35,6 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state) ...@@ -35,12 +35,6 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
return NULL; return NULL;
} }
static inline const struct tegra_dc_state *
to_const_dc_state(const struct drm_crtc_state *state)
{
return to_dc_state((struct drm_crtc_state *)state);
}
struct tegra_dc_stats { struct tegra_dc_stats {
unsigned long frames; unsigned long frames;
unsigned long vblank; unsigned long vblank;
......
...@@ -222,7 +222,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f ...@@ -222,7 +222,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
mapping->iova = sg_dma_address(mapping->sgt->sgl); mapping->iova = sg_dma_address(mapping->sgt->sgl);
} }
mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size; mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX), err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
GFP_KERNEL); GFP_KERNEL);
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include "intr.h" #include "intr.h"
#include "syncpt.h" #include "syncpt.h"
DEFINE_SPINLOCK(lock); static DEFINE_SPINLOCK(lock);
struct host1x_syncpt_fence { struct host1x_syncpt_fence {
struct dma_fence base; struct dma_fence base;
...@@ -152,8 +152,10 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold) ...@@ -152,8 +152,10 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL); fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
if (!fence->waiter) if (!fence->waiter) {
kfree(fence);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
fence->sp = sp; fence->sp = sp;
fence->threshold = threshold; fence->threshold = threshold;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment