Commit 1d428714 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2020-07-03' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Pretty usual rc4 pull: two usual amdgpu, i915 pulls, and some misc arm
  driver fixes.

  The bigger bit is including the asm sources for some GPU shaders that
  were contained in the i915 driver, otherwise it's pretty much business
  as usual.

  dma-buf:
   - fix a use-after-free bug

  amdgpu:
   - Fix for vega20 boards without RAS support
   - DC bandwidth revalidation fix
   - Fix Renoir vram info fetching
   - Fix hwmon freq printing

  i915:
   - GVT fixes
      - Two missed MMIO handler fixes for SKL/CFL
      - Fix mask register bits check
      - Fix one lockdep error for debugfs entry access
   - Include asm sources for render cache clear batches

  msm:
   - memleak fix
   - display block fix
   - address space fixes

  exynos:
   - error value and reference count fix
   - error print removal

  sun4i:
   - remove HPD polling"

* tag 'drm-fixes-2020-07-03' of git://anongit.freedesktop.org/drm/drm: (22 commits)
  drm/amdgpu: use %u rather than %d for sclk/mclk
  drm/amdgpu/atomfirmware: fix vram_info fetching for renoir
  drm/amd/display: Only revalidate bandwidth on medium and fast updates
  drm: sun4i: hdmi: Remove extra HPD polling
  drm/i915: Include asm sources for {ivb, hsw}_clear_kernel.c
  drm/exynos: fix ref count leak in mic_pre_enable
  drm/exynos: Properly propagate return value in drm_iommu_attach_device()
  drm/exynos: Remove dev_err() on platform_get_irq() failure
  drm/amd/powerplay: Fix NULL dereference in lock_bus() on Vega20 w/o RAS
  dma-buf: Move dma_buf_release() from fops to dentry_ops
  drm/msm: Fix up the rest of the messed up address sizes
  drm/msm: Fix setup of a6xx create_address_space.
  drm/msm: Fix address space size after refactor.
  drm/i915/gvt: Use GFP_ATOMIC instead of GFP_KERNEL in atomic context
  drm/i915/gvt: Fix incorrect check of enabled bits in mask registers
  drm/i915/gvt: Fix two CFL MMIO handling caused by regression.
  drm/i915/gvt: Add one missing MMIO handler for D_SKL_PLUS
  drm/msm: Fix 0xfffflub in "Refactor address space initialization"
  drm/msm/dpu: allow initialization of encoder locks during encoder init
  drm/msm/dpu: fix error return code in dpu_encoder_init
  ...
parents cdd3bb54 1298a549
......@@ -54,37 +54,11 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
dentry->d_name.name, ret > 0 ? name : "");
}
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
};
static struct vfsmount *dma_buf_mnt;
static int dma_buf_fs_init_context(struct fs_context *fc)
{
struct pseudo_fs_context *ctx;
ctx = init_pseudo(fc, DMA_BUF_MAGIC);
if (!ctx)
return -ENOMEM;
ctx->dops = &dma_buf_dentry_ops;
return 0;
}
static struct file_system_type dma_buf_fs_type = {
.name = "dmabuf",
.init_fs_context = dma_buf_fs_init_context,
.kill_sb = kill_anon_super,
};
static int dma_buf_release(struct inode *inode, struct file *file)
static void dma_buf_release(struct dentry *dentry)
{
struct dma_buf *dmabuf;
if (!is_dma_buf_file(file))
return -EINVAL;
dmabuf = file->private_data;
dmabuf = dentry->d_fsdata;
BUG_ON(dmabuf->vmapping_counter);
......@@ -110,9 +84,32 @@ static int dma_buf_release(struct inode *inode, struct file *file)
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);
}
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
};
static struct vfsmount *dma_buf_mnt;
static int dma_buf_fs_init_context(struct fs_context *fc)
{
struct pseudo_fs_context *ctx;
ctx = init_pseudo(fc, DMA_BUF_MAGIC);
if (!ctx)
return -ENOMEM;
ctx->dops = &dma_buf_dentry_ops;
return 0;
}
static struct file_system_type dma_buf_fs_type = {
.name = "dmabuf",
.init_fs_context = dma_buf_fs_init_context,
.kill_sb = kill_anon_super,
};
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
......@@ -412,7 +409,6 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
.release = dma_buf_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
......
......@@ -204,6 +204,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
case 12:
mem_channel_number = igp_info->v11.umachannelnumber;
/* channel width is 64 */
if (vram_width)
......
......@@ -2784,7 +2784,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
if (r)
return r;
return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
......@@ -2819,7 +2819,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
if (r)
return r;
return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
......
......@@ -2538,10 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_release_state(context);
return;
if (update_type > UPDATE_TYPE_FAST) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_release_state(context);
return;
}
}
commit_planes_for_stream(
......
......@@ -522,9 +522,11 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
if (ret)
goto err4;
if (adev->psp.ras.ras) {
ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
if (ret)
goto err4;
}
return 0;
......@@ -560,7 +562,8 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
(struct vega20_smumgr *)(hwmgr->smu_backend);
struct amdgpu_device *adev = hwmgr->adev;
smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
if (adev->psp.ras.ras)
smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
......
......@@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
int ret = 0;
if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
......@@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
if (ret)
clear_dma_max_seg_size(subdrv_dev);
return 0;
return ret;
}
/*
......
......@@ -1498,7 +1498,6 @@ static int g2d_probe(struct platform_device *pdev)
g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) {
dev_err(dev, "failed to get irq\n");
ret = g2d->irq;
goto err_put_clk;
}
......
......@@ -269,8 +269,10 @@ static void mic_pre_enable(struct drm_bridge *bridge)
goto unlock;
ret = pm_runtime_get_sync(mic->dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_noidle(mic->dev);
goto unlock;
}
mic_set_path(mic, 1);
......
ASM sources for auto generated shaders
======================================
The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
pre-compiled batch chunks that will clear any residual render cache during
context switch.
They are generated from their respective platform ASM files present on
i915/gt/shaders/clear_kernel directory.
The generated .c files should never be modified directly. Instead, any modification
needs to be done on the on their respective ASM files and build instructions below
needes to be followed.
Building
========
Environment
-----------
IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
on building.
Please make sure your Mesa tool is compiled with "-Dtools=intel" and
"-Ddri-drivers=i965", and run this script from IGT source root directory"
The instructions bellow assume:
* IGT gpu tools source code is located on your home directory (~) as ~/igt
* Mesa source code is located on your home directory (~) as ~/mesa
and built under the ~/mesa/build directory
* Linux kernel source code is under your home directory (~) as ~/linux
Instructions
------------
~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
~/igt/lib/i915/shaders/clear_kernel/ivb.asm
~ $ cd ~/igt
igt $ ./scripts/generate_clear_kernel.sh -g ivb \
-m ~/mesa/build/src/intel/tools/i965_asm
~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
~/igt/lib/i915/shaders/clear_kernel/hsw.asm
~ $ cd ~/igt
igt $ ./scripts/generate_clear_kernel.sh -g hsw \
-m ~/mesa/build/src/intel/tools/i965_asm
\ No newline at end of file
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
/*
* Kernel for PAVP buffer clear.
*
* 1. Clear all 64 GRF registers assigned to the kernel with designated value;
* 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
* 512 bytes of Render Cache.
*/
/* Store designated "clear GRF" value */
mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
/**
* Curbe Format
*
* DW 1.0 - Block Offset to write Render Cache
* DW 1.1 [15:0] - Clear Word
* DW 1.2 - Delay iterations
* DW 1.3 - Enable Instrumentation (only for debug)
* DW 1.4 - Rsvd (intended for context ID)
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
* DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
*
* Binding Table
*
* BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
* BTI 1: Wait/Instrumentation Buffer
* Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
* Expected to be initialized to 0 by driver/another kernel
* Layout:
* RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
* Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
*/
add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
(+f0.0) jmpi(1) 352D { align1 WE_all 1N };
/**
* State Register has info on where this thread is running
* IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
* HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
*/
mov(8) g3<1>UD 0x00000000UD { align1 1Q };
shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
mov(8) g5<1>UD 0x00000000UD { align1 1Q };
and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
/* Media block read to fetch current value at specified location in instrumentation buffer */
sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
/* Media block write for updated value at specified location in instrumentation buffer */
sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
/* Delay thread for specified parameter */
add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
(+f0.0) jmpi(1) -32D { align1 WE_all 1N };
/* Store designated "clear GRF" value */
mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
/* Initialize looping parameters */
mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
/* Write 32x16 all "0" block */
mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
mov(16) g3<1>UD 0x00000000UD { align1 1H };
mov(16) g4<1>UD 0x00000000UD { align1 1H };
mov(16) g5<1>UD 0x00000000UD { align1 1H };
mov(16) g6<1>UD 0x00000000UD { align1 1H };
mov(16) g7<1>UD 0x00000000UD { align1 1H };
mov(16) g8<1>UD 0x00000000UD { align1 1H };
mov(16) g9<1>UD 0x00000000UD { align1 1H };
mov(16) g10<1>UD 0x00000000UD { align1 1H };
sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
/* Now, clear all GRF registers */
add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
(+f0.0) jmpi(1) -64D { align1 WE_all 1N };
/* Terminante the thread */
sendc(8) null<1>UD g127<8,8,1>F 0x82000010
thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
/*
* Kernel for PAVP buffer clear.
*
* 1. Clear all 64 GRF registers assigned to the kernel with designated value;
* 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
* 512 bytes of Render Cache.
*/
/* Store designated "clear GRF" value */
mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
/**
* Curbe Format
*
* DW 1.0 - Block Offset to write Render Cache
* DW 1.1 [15:0] - Clear Word
* DW 1.2 - Delay iterations
* DW 1.3 - Enable Instrumentation (only for debug)
* DW 1.4 - Rsvd (intended for context ID)
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
* DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
*
* Binding Table
*
* BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
* BTI 1: Wait/Instrumentation Buffer
* Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
* Expected to be initialized to 0 by driver/another kernel
* Layout :
* RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
* Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
*/
add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
(+f0.0) jmpi(1) 44D { align1 WE_all 1N };
/**
* State Register has info on where this thread is running
* IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
* HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
*/
mov(8) g3<1>UD 0x00000000UD { align1 1Q };
shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
mov(8) g5<1>UD 0x00000000UD { align1 1Q };
and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
/* Media block read to fetch current value at specified location in instrumentation buffer */
sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
/* Media block write for updated value at specified location in instrumentation buffer */
sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
/* Delay thread for specified parameter */
add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
(+f0.0) jmpi(1) -4D { align1 WE_all 1N };
/* Store designated "clear GRF" value */
mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
/* Initialize looping parameters */
mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
/* Write 32x16 all "0" block */
mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
mov(16) g3<1>UD 0x00000000UD { align1 1H };
mov(16) g4<1>UD 0x00000000UD { align1 1H };
mov(16) g5<1>UD 0x00000000UD { align1 1H };
mov(16) g6<1>UD 0x00000000UD { align1 1H };
mov(16) g7<1>UD 0x00000000UD { align1 1H };
mov(16) g8<1>UD 0x00000000UD { align1 1H };
mov(16) g9<1>UD 0x00000000UD { align1 1H };
mov(16) g10<1>UD 0x00000000UD { align1 1H };
sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
/* Now, clear all GRF registers */
add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
(+f0.0) jmpi(1) -8D { align1 WE_all 1N };
/* Terminante the thread */
sendc(8) null<1>UD g127<8,8,1>F 0x82000010
thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
......@@ -66,7 +66,7 @@ static inline int mmio_diff_handler(struct intel_gvt *gvt,
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
node = kmalloc(sizeof(*node), GFP_KERNEL);
node = kmalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
......
......@@ -1726,13 +1726,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
if (data & _MASKED_BIT_ENABLE(1)) {
if (IS_MASKED_BITS_ENABLED(data, 1)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
data & _MASKED_BIT_ENABLE(2)) {
IS_MASKED_BITS_ENABLED(data, 2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
......@@ -1741,14 +1741,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
*/
if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
(data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
&& !vgpu->pv_notified) {
if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
!vgpu->pv_notified) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
gvt_dbg_core("EXECLIST %s on ring %s\n",
......@@ -1809,7 +1809,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
data |= RESET_CTL_READY_TO_RESET;
else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
data &= ~RESET_CTL_READY_TO_RESET;
......@@ -1827,7 +1827,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
write_vreg(vgpu, offset, p_data, bytes);
if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
IS_MASKED_BITS_ENABLED(data, 0x8))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
......@@ -3055,6 +3056,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
......@@ -3131,8 +3133,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
return 0;
}
......
......@@ -54,8 +54,8 @@ bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
#define IS_RESTORE_INHIBIT(a) \
(_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
#define IS_RESTORE_INHIBIT(a) \
IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)
#endif
......@@ -94,6 +94,11 @@
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
#define IS_MASKED_BITS_ENABLED(_val, _b) \
(((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
#define IS_MASKED_BITS_DISABLED(_val, _b) \
((_val) & _MASKED_BIT_DISABLE(_b))
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
......
......@@ -408,7 +408,7 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
SZ_16M + 0xfff * SZ_64K);
0xfff * SZ_64K);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
......
......@@ -1121,7 +1121,7 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return -ENODEV;
mmu = msm_iommu_new(gmu->dev, domain);
gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
if (IS_ERR(gmu->aspace)) {
iommu_domain_free(domain);
return PTR_ERR(gmu->aspace);
......
......@@ -893,8 +893,8 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
#endif
.create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a6xx_get_timestamp,
};
......
......@@ -194,7 +194,7 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
0xfffffff);
0xffffffff - SZ_16M);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
......
......@@ -521,7 +521,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_kms *dpu_kms,
struct drm_display_mode *mode)
{
struct msm_display_topology topology;
struct msm_display_topology topology = {0};
int i, intf_count = 0;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
......@@ -537,7 +537,8 @@ static struct msm_display_topology dpu_encoder_get_topology(
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
* Adding color blocks only to primary interface
* Adding color blocks only to primary interface if available in
* sufficient number
*/
if (intf_count == 2)
topology.num_lm = 2;
......@@ -546,8 +547,11 @@ static struct msm_display_topology dpu_encoder_get_topology(
else
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
topology.num_dspp = topology.num_lm;
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
if (dpu_kms->catalog->dspp &&
(dpu_kms->catalog->dspp_count >= topology.num_lm))
topology.num_dspp = topology.num_lm;
}
topology.num_enc = 0;
topology.num_intf = intf_count;
......@@ -2136,7 +2140,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
dpu_enc = to_dpu_encoder_virt(enc);
mutex_init(&dpu_enc->enc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
......@@ -2151,7 +2154,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
0);
mutex_init(&dpu_enc->rc_lock);
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
......@@ -2183,7 +2185,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
if (!dpu_enc)
return ERR_PTR(ENOMEM);
return ERR_PTR(-ENOMEM);
rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
drm_enc_mode, NULL);
......@@ -2196,6 +2198,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
spin_lock_init(&dpu_enc->enc_spinlock);
dpu_enc->enabled = false;
mutex_init(&dpu_enc->enc_lock);
mutex_init(&dpu_enc->rc_lock);
return &dpu_enc->base;
}
......
......@@ -780,7 +780,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
aspace = msm_gem_address_space_create(mmu, "dpu1",
0x1000, 0xfffffff);
0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
mmu->funcs->destroy(mmu);
......
......@@ -514,7 +514,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
config->iommu);
aspace = msm_gem_address_space_create(mmu,
"mdp4", 0x1000, 0xffffffff);
"mdp4", 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
......
......@@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
aspace = msm_gem_address_space_create(mmu, "mdp5",
0x1000, 0xffffffff);
0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
......
......@@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags;
if (priv->gpu) {
if (prio >= priv->gpu->nr_rings)
if (prio >= priv->gpu->nr_rings) {
kfree(queue);
return -EINVAL;
}
queue->prio = prio;
}
......
......@@ -259,9 +259,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
unsigned long reg;
if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
reg & SUN4I_HDMI_HPD_HIGH,
0, 500000)) {
reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
if (reg & SUN4I_HDMI_HPD_HIGH) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment