Commit 3ded7acf authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "A bunch of fixes:
   - vmware memory corruption
   - ttm spinlock balance
   - cirrus/mgag200 work in the presence of efifb
  and finally Alex and Jerome managed to track down a magic set of bits
  that on certain rv740 and evergreen cards allow the correct use of the
  complete set of render backends, this makes the cards operate
  correctly in a number of scenarios we had issues in before, it also
  manages to boost speed on benchmarks my large amounts on these
  specific gpus."

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/edid: Make the header fixup threshold tunable
  drm/radeon: fix regression in UMS CS ioctl
  drm/vmwgfx: Fix nasty write past alloced memory area
  drm/ttm: Fix spinlock imbalance
  drm/radeon: fixup tiling group size and backendmap on r6xx-r9xx (v4)
  drm/radeon: fix HD6790, HD6570 backend programming
  drm/radeon: properly program gart on rv740, juniper, cypress, barts, hemlock
  drm/radeon: fix bank information in tiling config
  drm/mgag200: kick off conflicting framebuffers earlier.
  drm/cirrus: kick out conflicting framebuffers earlier
  cirrus: avoid crash if driver fails to load
parents 37b22400 47819ba2
...@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { ...@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{0,} {0,}
}; };
static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
kfree(ap);
}
static int __devinit static int __devinit
cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
cirrus_kick_out_firmware_fb(pdev);
return drm_get_pci_dev(pdev, ent, &driver); return drm_get_pci_dev(pdev, ent, &driver);
} }
......
...@@ -145,7 +145,7 @@ struct cirrus_device { ...@@ -145,7 +145,7 @@ struct cirrus_device {
struct ttm_bo_device bdev; struct ttm_bo_device bdev;
atomic_t validate_sequence; atomic_t validate_sequence;
} ttm; } ttm;
bool mm_inited;
}; };
......
...@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus) ...@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
pci_resource_len(dev->pdev, 0), pci_resource_len(dev->pdev, 0),
DRM_MTRR_WC); DRM_MTRR_WC);
cirrus->mm_inited = true;
return 0; return 0;
} }
void cirrus_mm_fini(struct cirrus_device *cirrus) void cirrus_mm_fini(struct cirrus_device *cirrus)
{ {
struct drm_device *dev = cirrus->dev; struct drm_device *dev = cirrus->dev;
if (!cirrus->mm_inited)
return;
ttm_bo_device_release(&cirrus->ttm.bdev); ttm_bo_device_release(&cirrus->ttm.bdev);
cirrus_ttm_global_release(cirrus); cirrus_ttm_global_release(cirrus);
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/export.h> #include <linux/module.h>
#include "drmP.h" #include "drmP.h"
#include "drm_edid.h" #include "drm_edid.h"
#include "drm_edid_modes.h" #include "drm_edid_modes.h"
...@@ -149,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid) ...@@ -149,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
} }
EXPORT_SYMBOL(drm_edid_header_is_valid); EXPORT_SYMBOL(drm_edid_header_is_valid);
static int edid_fixup __read_mostly = 6;
module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
/* /*
* Sanity check the EDID block (base or extension). Return 0 if the block * Sanity check the EDID block (base or extension). Return 0 if the block
...@@ -160,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block) ...@@ -160,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
u8 csum = 0; u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid; struct edid *edid = (struct edid *)raw_edid;
if (edid_fixup > 8 || edid_fixup < 0)
edid_fixup = 6;
if (block == 0) { if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid); int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ; if (score == 8) ;
else if (score >= 6) { else if (score >= edid_fixup) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header)); memcpy(raw_edid, edid_header, sizeof(edid_header));
} else { } else {
......
...@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { ...@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
MODULE_DEVICE_TABLE(pci, pciidlist); MODULE_DEVICE_TABLE(pci, pciidlist);
static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
kfree(ap);
}
static int __devinit static int __devinit
mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
mgag200_kick_out_firmware_fb(pdev);
return drm_get_pci_dev(pdev, ent, &driver); return drm_get_pci_dev(pdev, ent, &driver);
} }
......
This diff is collapsed.
...@@ -37,6 +37,15 @@ ...@@ -37,6 +37,15 @@
#define EVERGREEN_MAX_PIPES_MASK 0xFF #define EVERGREEN_MAX_PIPES_MASK 0xFF
#define EVERGREEN_MAX_LDS_NUM 0xFFFF #define EVERGREEN_MAX_LDS_NUM 0xFFFF
#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
/* Registers */ /* Registers */
#define RCU_IND_INDEX 0x100 #define RCU_IND_INDEX 0x100
...@@ -54,6 +63,7 @@ ...@@ -54,6 +63,7 @@
#define BACKEND_DISABLE(x) ((x) << 16) #define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8 #define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0) #define NUM_PIPES(x) ((x) << 0)
#define NUM_PIPES_MASK 0x0000000f
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) #define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) #define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
#define NUM_SHADER_ENGINES(x) ((x) << 12) #define NUM_SHADER_ENGINES(x) ((x) << 12)
...@@ -452,6 +462,7 @@ ...@@ -452,6 +462,7 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C #define MC_VM_MD_L1_TLB2_CNTL 0x265C
#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
......
This diff is collapsed.
...@@ -41,6 +41,9 @@ ...@@ -41,6 +41,9 @@
#define CAYMAN_MAX_TCC 16 #define CAYMAN_MAX_TCC 16
#define CAYMAN_MAX_TCC_MASK 0xFF #define CAYMAN_MAX_TCC_MASK 0xFF
#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define DMIF_ADDR_CONFIG 0xBD4 #define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_GFX_CNTL 0x0E44 #define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0) #define RINGID(x) (((x) & 0x3) << 0)
...@@ -148,6 +151,8 @@ ...@@ -148,6 +151,8 @@
#define CGTS_SYS_TCC_DISABLE 0x3F90 #define CGTS_SYS_TCC_DISABLE 0x3F90
#define CGTS_USER_SYS_TCC_DISABLE 0x3F94 #define CGTS_USER_SYS_TCC_DISABLE 0x3F94
#define RLC_GFX_INDEX 0x3FC4
#define CONFIG_MEMSIZE 0x5428 #define CONFIG_MEMSIZE 0x5428
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
...@@ -212,6 +217,12 @@ ...@@ -212,6 +217,12 @@
#define SOFT_RESET_VGT (1 << 14) #define SOFT_RESET_VGT (1 << 14)
#define SOFT_RESET_IA (1 << 15) #define SOFT_RESET_IA (1 << 15)
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
#define INSTANCE_BROADCAST_WRITES (1 << 30)
#define SE_BROADCAST_WRITES (1 << 31)
#define SCRATCH_REG0 0x8500 #define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504 #define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508 #define SCRATCH_REG2 0x8508
......
...@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev) ...@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
return r600_gpu_soft_reset(rdev); return r600_gpu_soft_reset(rdev);
} }
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 num_backends, u32 tiling_pipe_num,
u32 backend_disable_mask) u32 max_rb_num,
{ u32 total_max_rb_num,
u32 backend_map = 0; u32 disabled_rb_mask)
u32 enabled_backends_mask; {
u32 enabled_backends_count; u32 rendering_pipe_num, rb_num_width, req_rb_num;
u32 cur_pipe; u32 pipe_rb_ratio, pipe_rb_remain;
u32 swizzle_pipe[R6XX_MAX_PIPES]; u32 data = 0, mask = 1 << (max_rb_num - 1);
u32 cur_backend; unsigned i, j;
u32 i;
/* mask out the RBs that don't exist on that asic */
if (num_tile_pipes > R6XX_MAX_PIPES) disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
num_tile_pipes = R6XX_MAX_PIPES;
if (num_tile_pipes < 1) rendering_pipe_num = 1 << tiling_pipe_num;
num_tile_pipes = 1; req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
if (num_backends > R6XX_MAX_BACKENDS) BUG_ON(rendering_pipe_num < req_rb_num);
num_backends = R6XX_MAX_BACKENDS;
if (num_backends < 1) pipe_rb_ratio = rendering_pipe_num / req_rb_num;
num_backends = 1; pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
enabled_backends_mask = 0; if (rdev->family <= CHIP_RV740) {
enabled_backends_count = 0; /* r6xx/r7xx */
for (i = 0; i < R6XX_MAX_BACKENDS; ++i) { rb_num_width = 2;
if (((backend_disable_mask >> i) & 1) == 0) { } else {
enabled_backends_mask |= (1 << i); /* eg+ */
++enabled_backends_count; rb_num_width = 4;
}
if (enabled_backends_count == num_backends)
break;
}
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
break;
case 4:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
break;
case 5:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
break;
case 6:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
break;
case 7:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
break;
case 8:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
break;
} }
cur_backend = 0; for (i = 0; i < max_rb_num; i++) {
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { if (!(mask & disabled_rb_mask)) {
while (((1 << cur_backend) & enabled_backends_mask) == 0) for (j = 0; j < pipe_rb_ratio; j++) {
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; data <<= rb_num_width;
data |= max_rb_num - i - 1;
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); }
if (pipe_rb_remain) {
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; data <<= rb_num_width;
data |= max_rb_num - i - 1;
pipe_rb_remain--;
}
}
mask >>= 1;
} }
return backend_map; return data;
} }
int r600_count_pipe_bits(uint32_t val) int r600_count_pipe_bits(uint32_t val)
...@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
{ {
u32 tiling_config; u32 tiling_config;
u32 ramcfg; u32 ramcfg;
u32 backend_map;
u32 cc_rb_backend_disable; u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config; u32 cc_gc_shader_pipe_config;
u32 tmp; u32 tmp;
...@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
u32 sq_thread_resource_mgmt = 0; u32 sq_thread_resource_mgmt = 0;
u32 sq_stack_resource_mgmt_1 = 0; u32 sq_stack_resource_mgmt_1 = 0;
u32 sq_stack_resource_mgmt_2 = 0; u32 sq_stack_resource_mgmt_2 = 0;
u32 disabled_rb_mask;
/* FIXME: implement */ rdev->config.r600.tiling_group_size = 256;
switch (rdev->family) { switch (rdev->family) {
case CHIP_R600: case CHIP_R600:
rdev->config.r600.max_pipes = 4; rdev->config.r600.max_pipes = 4;
...@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
rdev->config.r600.tiling_group_size = 512;
else
rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) { if (tmp > 3) {
tiling_config |= ROW_TILING(3); tiling_config |= ROW_TILING(3);
...@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= BANK_SWAPS(1); tiling_config |= BANK_SWAPS(1);
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |= tmp = R6XX_MAX_BACKENDS -
BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
if (tmp < rdev->config.r600.max_backends) {
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; rdev->config.r600.max_backends = tmp;
cc_gc_shader_pipe_config |= }
INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |= cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); tmp = R6XX_MAX_PIPES -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, if (tmp < rdev->config.r600.max_pipes) {
(R6XX_MAX_BACKENDS - rdev->config.r600.max_pipes = tmp;
r600_count_pipe_bits((cc_rb_backend_disable & }
R6XX_MAX_BACKENDS_MASK) >> 16)), tmp = R6XX_MAX_SIMDS -
(cc_rb_backend_disable >> 16)); r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.r600.max_simds) {
rdev->config.r600.max_simds = tmp;
}
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
R6XX_MAX_BACKENDS, disabled_rb_mask);
tiling_config |= tmp << 16;
rdev->config.r600.backend_map = tmp;
rdev->config.r600.tile_config = tiling_config; rdev->config.r600.tile_config = tiling_config;
rdev->config.r600.backend_map = backend_map;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config); WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
/* Setup pipes */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
......
...@@ -219,6 +219,8 @@ ...@@ -219,6 +219,8 @@
#define BACKEND_MAP(x) ((x) << 16) #define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0 #define GB_TILING_CONFIG 0x98F0
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES(x) ((x) << 8)
......
...@@ -1848,6 +1848,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); ...@@ -1848,6 +1848,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
extern void r600_hdmi_enable(struct drm_encoder *encoder); extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder); extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
u32 total_max_rb_num,
u32 enabled_rb_mask);
/* /*
* evergreen functions used by radeon_encoder.c * evergreen functions used by radeon_encoder.c
......
...@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p) ...@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
sync_to_ring, p->ring); sync_to_ring, p->ring);
} }
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{ {
struct drm_radeon_cs *cs = data; struct drm_radeon_cs *cs = data;
...@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) ...@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
} }
} }
if ((p->cs_flags & RADEON_CS_USE_VM) && /* these are KMS only */
!p->rdev->vm_manager.enabled) { if (p->rdev) {
DRM_ERROR("VM not active on asic!\n"); if ((p->cs_flags & RADEON_CS_USE_VM) &&
return -EINVAL; !p->rdev->vm_manager.enabled) {
} DRM_ERROR("VM not active on asic!\n");
return -EINVAL;
/* we only support VM on SI+ */ }
if ((p->rdev->family >= CHIP_TAHITI) &&
((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
DRM_ERROR("VM required on SI+!\n");
return -EINVAL;
}
if (radeon_cs_get_ring(p, ring, priority)) /* we only support VM on SI+ */
return -EINVAL; if ((p->rdev->family >= CHIP_TAHITI) &&
((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
DRM_ERROR("VM required on SI+!\n");
return -EINVAL;
}
if (radeon_cs_get_ring(p, ring, priority))
return -EINVAL;
}
/* deal with non-vm */ /* deal with non-vm */
if ((p->chunk_ib_idx != -1) && if ((p->chunk_ib_idx != -1) &&
......
This diff is collapsed.
...@@ -106,10 +106,13 @@ ...@@ -106,10 +106,13 @@
#define BACKEND_MAP(x) ((x) << 16) #define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0 #define GB_TILING_CONFIG 0x98F0
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00 #define INACTIVE_QD_PIPES_MASK 0x0000FF00
#define INACTIVE_QD_PIPES_SHIFT 8
#define INACTIVE_SIMDS(x) ((x) << 16) #define INACTIVE_SIMDS(x) ((x) << 16)
#define INACTIVE_SIMDS_MASK 0x00FF0000 #define INACTIVE_SIMDS_MASK 0x00FF0000
...@@ -174,6 +177,7 @@ ...@@ -174,6 +177,7 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C #define MC_VM_MD_L1_TLB2_CNTL 0x265C
#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
......
...@@ -1834,6 +1834,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ...@@ -1834,6 +1834,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
(void) ttm_bo_cleanup_refs(bo, false, false, false); (void) ttm_bo_cleanup_refs(bo, false, false, false);
kref_put(&bo->list_kref, ttm_bo_release_list); kref_put(&bo->list_kref, ttm_bo_release_list);
spin_lock(&glob->lru_lock);
continue; continue;
} }
......
...@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, ...@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
cmd += sizeof(remap_cmd) / sizeof(uint32); cmd += sizeof(remap_cmd) / sizeof(uint32);
for (i = 0; i < num_pages; ++i) { for (i = 0; i < num_pages; ++i) {
if (VMW_PPN_SIZE > 4) if (VMW_PPN_SIZE <= 4)
*cmd = page_to_pfn(*pages++); *cmd = page_to_pfn(*pages++);
else else
*((uint64_t *)cmd) = page_to_pfn(*pages++); *((uint64_t *)cmd) = page_to_pfn(*pages++);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment