Commit 7136470d authored by Dave Airlie's avatar Dave Airlie

Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
Besides the big item of lifting the "preliminary hw support" tag from the
 Haswell code, just small bits&pieces all over:
 - Leftover Haswell patches and some fixes from Paulo
 - LyncPoint PCH support (for hsw)
 - OOM handling improvements from Chris Wilson
 - connector property and send_vblank_event refactorings from Rob Clark
 - random pile of small fixes

 Note that the send_vblank refactorings will cause some locking WARNs to
 show up. Imre has fixed that up, but since all the driver changes outside
 of the drm core have been for exonys, those four patches are merged
 through the exonys-next tree.

Meh, I've forgotten to cherry-pick an important fix from Ben for a
regression in the 3.8 gen6+ gtt code. New pull request below. While I'm at
it, the hdmi VIC patch for the drm edid code is still in my queue, I'll
send you that in the next 3.8-fixes pull.

* 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel: (33 commits)
  drm/i915: Fix pte updates in ggtt clear range
  drm/i915: promote Haswell to full support
  drm/i915: Report the origin of the LVDS fixed panel mode
  drm/i915: LVDS fallback to fixed-mode if EDID not present
  drm/i915/sdvo: kfree the intel_sdvo_connector, not drm_connector, on destroy
  drm/i915: drm_connector_property -> drm_object_property
  drm/i915: use drm_send_vblank_event() helper
  drm/i915: Use pci_resource functions for BARs.
  drm/i915: Borrow our struct_mutex for the direct reclaim
  drm/i915: Defer assignment of obj->gtt_space until after all possible mallocs
  drm/i915: Apply the IBX transcoder A w/a for HDMI to SDVO as well
  drm/i915: implement WaMbcDriverBootEnable on Haswell
  drm/i915: fix intel_ddi_get_cdclk_freq for ULT machines
  drm/i915: make the panel fitter work on pipes B and C on Haswell
  drm/i915: make the panel fitter work on pipes B and C on IVB
  drm/i915: don't intel_crt_init if DDI A has 4 lanes
  drm/i915: make DP work on LPT-LP machines
  drm/i915: fix false positive "Unclaimed write" messages
  drm/i915: use cpu/pch transcoder on intel_enable_pipe
  drm/i915: don't limit Haswell CRT encoder to pipe A
  ...
parents bd3b49f2 2ff4aeac
......@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
unsigned int i915_fbpercrtc __always_unused = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
int i915_panel_ignore_lid __read_mostly = 0;
int i915_panel_ignore_lid __read_mostly = 1;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect [default], 1=lid open, "
"-1=lid closed)");
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
......@@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -416,8 +410,9 @@ void intel_detect_pch(struct drm_device *dev)
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
int id;
unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
......@@ -440,6 +435,11 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
......@@ -884,7 +884,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
if (intel_info->is_haswell || intel_info->is_valleyview)
if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV;
......@@ -1258,6 +1258,10 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
} \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
......
......@@ -402,7 +402,6 @@ struct i915_suspend_saved_registers {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
......@@ -738,6 +737,7 @@ typedef struct drm_i915_private {
/* PCH chipset type */
enum intel_pch pch_type;
unsigned short pch_id;
unsigned long quirks;
......@@ -1161,6 +1161,8 @@ struct drm_i915_file_private {
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
/*
* The genX designation typically refers to the render engine, so render
......@@ -1206,6 +1208,13 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
......@@ -1541,7 +1550,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev);
extern inline void i915_gem_chipset_flush(struct drm_device *dev)
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
......
......@@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Now bind it into the GTT if needed */
if (!obj->map_and_fenceable) {
ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
if (!obj->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
ret = i915_gem_object_pin(obj, 0, true, false);
if (ret)
goto unlock;
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto unlock;
}
if (!obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, obj->cache_level);
goto unpin;
ret = i915_gem_object_get_fence(obj);
if (ret)
goto unlock;
if (i915_gem_object_is_inactive(obj))
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
goto unpin;
obj->fault_mappable = true;
......@@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
unpin:
i915_gem_object_unpin(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
......@@ -2925,10 +2914,11 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
search_free:
if (map_and_fenceable)
free_space =
drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
......@@ -2939,60 +2929,60 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (free_space != NULL) {
if (map_and_fenceable)
obj->gtt_space =
free_space =
drm_mm_get_block_range_generic(free_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
else
obj->gtt_space =
free_space =
drm_mm_get_block_generic(free_space,
size, alignment, obj->cache_level,
false);
}
if (obj->gtt_space == NULL) {
if (free_space == NULL) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
if (ret)
if (ret) {
i915_gem_object_unpin_pages(obj);
return ret;
}
goto search_free;
}
if (WARN_ON(!i915_gem_valid_gtt_space(dev,
obj->gtt_space,
free_space,
obj->cache_level))) {
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(free_space);
return -EINVAL;
}
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(free_space);
return ret;
}
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->gtt_offset = obj->gtt_space->start;
obj->gtt_space = free_space;
obj->gtt_offset = free_space->start;
fenceable =
obj->gtt_space->size == fence_size &&
(obj->gtt_space->start & (fence_alignment - 1)) == 0;
free_space->size == fence_size &&
(free_space->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->map_and_fenceable = mappable && fenceable;
i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
......@@ -3456,11 +3446,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (obj->gtt_space == NULL) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
if (!obj->has_global_gtt_mapping && map_and_fenceable)
......@@ -4347,6 +4342,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
return false;
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
return false;
#endif
}
static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{
......@@ -4357,11 +4365,16 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
int cnt;
if (!mutex_trylock(&dev->struct_mutex))
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;
unlock = false;
}
if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
......@@ -4376,6 +4389,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt;
}
......@@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj->cache_level);
}
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: "
......
......@@ -367,8 +367,9 @@ static void i915_ggtt_clear_range(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
volatile void __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
int i;
if (INTEL_INFO(dev)->gen < 6) {
intel_gtt_clear_range(first_entry, num_entries);
......@@ -381,7 +382,8 @@ static void i915_ggtt_clear_range(struct drm_device *dev,
num_entries = max_entries;
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
memset_io(gtt_base, scratch_pte, num_entries * sizeof(scratch_pte));
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
......@@ -609,7 +611,6 @@ int i915_gem_gtt_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
u16 snb_gmch_ctl;
u32 tmp;
int ret;
/* On modern platforms we need not worry ourself with the legacy
......@@ -638,12 +639,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_0, &tmp);
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = (tmp & PCI_BASE_ADDRESS_MEM_MASK) + (2<<20);
pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_2, &tmp);
dev_priv->mm.gtt->gma_bus_addr = tmp & PCI_BASE_ADDRESS_MEM_MASK;
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
/* i9xx_setup */
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
......
......@@ -3339,6 +3339,8 @@
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
#define PF_PIPE_SEL_MASK_IVB (3<<29)
#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
......@@ -3851,6 +3853,7 @@
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
......@@ -4514,6 +4517,7 @@
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
......
......@@ -811,10 +811,6 @@ int i915_save_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
/* Hardware status page */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->regfile.saveHWS = I915_READ(HWS_PGA);
i915_save_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
......@@ -865,10 +861,6 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
/* Hardware status page */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
I915_WRITE(HWS_PGA, dev_priv->regfile.saveHWS);
i915_restore_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
......
......@@ -762,7 +762,8 @@ void intel_setup_bios(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Set the Panel Power On/Off timings if uninitialized. */
if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
if (!HAS_PCH_SPLIT(dev) &&
I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
......
......@@ -751,7 +751,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true;
if (IS_HASWELL(dev) || IS_I830(dev))
if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
......
......@@ -669,6 +669,15 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
break;
}
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
/* write eld */
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
}
intel_dp_init_link_config(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
......@@ -1300,6 +1309,8 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
LCPLL_CLK_FREQ_450)
return 450;
else if (IS_ULT(dev_priv->dev))
return 338;
else
return 540;
}
......
......@@ -1149,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
return;
} else {
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_ENABLE);
}
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
......@@ -1189,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
return;
}
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
......@@ -1821,9 +1812,15 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum transcoder pch_transcoder;
int reg;
u32 val;
if (IS_HASWELL(dev_priv->dev))
pch_transcoder = TRANSCODER_A;
else
pch_transcoder = pipe;
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
......@@ -1834,8 +1831,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pipe);
assert_fdi_tx_pll_enabled(dev_priv, pipe);
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
......@@ -2924,9 +2921,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_EN));
} else if (HAS_PCH_CPT(dev)) {
cpt_phase_pointer_disable(dev, pipe);
}
......@@ -3393,6 +3387,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
* as some pre-programmed values are broken,
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
......@@ -3469,7 +3467,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
* as some pre-programmed values are broken,
* e.g. x201.
*/
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
......@@ -6899,14 +6898,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
mutex_lock(&work->dev->struct_mutex);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
intel_update_fbc(work->dev);
mutex_unlock(&work->dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
kfree(work);
}
......@@ -6917,8 +6921,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
struct timeval tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
......@@ -6934,17 +6936,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
intel_crtc->unpin_work = NULL;
if (work->event) {
e = work->event;
e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
e->event.tv_sec = tvbl.tv_sec;
e->event.tv_usec = tvbl.tv_usec;
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
if (work->event)
drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
drm_vblank_put(dev, intel_crtc->pipe);
......@@ -6954,9 +6947,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
queue_work(dev_priv->wq, &work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
......@@ -7257,7 +7250,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -ENOMEM;
work->event = event;
work->dev = crtc->dev;
work->crtc = crtc;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
......@@ -7282,6 +7275,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
......@@ -7300,6 +7296,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
atomic_inc(&intel_crtc->unpin_work_count);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
......@@ -7314,6 +7311,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
......@@ -7609,7 +7607,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
dev->mode_config.dpms_property;
connector->dpms = DRM_MODE_DPMS_ON;
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
dpms_property,
DRM_MODE_DPMS_ON);
......@@ -8263,6 +8261,8 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
if (!(IS_HASWELL(dev) &&
(I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
intel_crt_init(dev);
if (IS_HASWELL(dev)) {
......
......@@ -2393,7 +2393,7 @@ intel_dp_set_property(struct drm_connector *connector,
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
int ret;
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
......
......@@ -211,6 +211,8 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
atomic_t unpin_work_count;
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
......@@ -395,7 +397,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
......
......@@ -874,7 +874,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
......
......@@ -460,13 +460,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode;
/* use cached edid if we have one */
if (lvds_connector->base.edid) {
/* invalid edid */
if (IS_ERR(lvds_connector->base.edid))
return 0;
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
return drm_add_edid_modes(connector, lvds_connector->base.edid);
}
mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
if (mode == NULL)
......@@ -1016,7 +1011,7 @@ bool intel_lvds_init(struct drm_device *dev)
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
drm_connector_attach_property(&intel_connector->base,
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
......@@ -1061,14 +1056,23 @@ bool intel_lvds_init(struct drm_device *dev)
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
DRM_DEBUG_KMS("using preferred mode from EDID: ");
drm_mode_debug_printmodeline(scan);
fixed_mode = drm_mode_duplicate(dev, scan);
intel_find_lvds_downclock(dev, fixed_mode, connector);
if (fixed_mode) {
intel_find_lvds_downclock(dev, fixed_mode,
connector);
goto out;
}
}
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
DRM_DEBUG_KMS("using mode from VBT: ");
drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
......@@ -1093,6 +1097,8 @@ bool intel_lvds_init(struct drm_device *dev)
if (crtc && (lvds & LVDS_PORT_EN)) {
fixed_mode = intel_crtc_mode_get(dev, crtc);
if (fixed_mode) {
DRM_DEBUG_KMS("using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
......
......@@ -97,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
drm_object_attach_property(&connector->base, prop, 0);
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
......@@ -124,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
drm_object_attach_property(&connector->base, prop, 0);
}
......@@ -374,26 +374,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
#endif
if (i915_panel_ignore_lid)
return i915_panel_ignore_lid > 0 ?
connector_status_connected :
connector_status_disconnected;
/* opregion lid state on HP 2540p is wrong at boot up,
* appears to be either the BIOS or Linux ACPI fault */
#if 0
/* Assume that the BIOS does not lie through the OpRegion... */
if (dev_priv->opregion.lid_state)
if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
#endif
}
switch (i915_panel_ignore_lid) {
case -2:
return connector_status_connected;
case -1:
return connector_status_disconnected;
default:
return connector_status_unknown;
}
}
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
......
......@@ -2552,7 +2552,8 @@ static void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
int gpu_freq, ia_freq, max_ia_freq;
int gpu_freq;
unsigned int ia_freq, max_ia_freq;
int scaling_factor = 180;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
......@@ -3518,6 +3519,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
......@@ -3548,6 +3550,20 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
static void lpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
I915_WRITE(SOUTH_DSPCLK_GATE_D,
I915_READ(SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
}
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -3590,6 +3606,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
/* XXX: This is a workaround for early silicon revisions and should be
* removed later.
*/
......@@ -3599,6 +3619,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
WM_DBG_DISALLOW_SPRITE |
WM_DBG_DISALLOW_MAXFIFO);
lpt_init_clock_gating(dev);
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
......@@ -3680,6 +3701,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
intel_flush_display_plane(dev_priv, pipe);
}
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
......@@ -3745,6 +3767,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
......
......@@ -558,12 +558,9 @@ update_mboxes(struct intel_ring_buffer *ring,
u32 seqno,
u32 mmio_offset)
{
intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_REGISTER |
MI_SEMAPHORE_UPDATE);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
intel_ring_emit(ring, seqno);
}
/**
......
......@@ -1228,6 +1228,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
/* HW workaround for IBX, we need to move the port to
* transcoder A before disabling it. */
if (HAS_PCH_IBX(encoder->base.dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
if (temp & SDVO_PIPE_B_SELECT) {
temp &= ~SDVO_PIPE_B_SELECT;
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
/* Again we need to write this twice. */
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
/* Transcoder selection bits only update
* effectively on vblank. */
if (crtc)
intel_wait_for_vblank(encoder->base.dev, pipe);
else
msleep(50);
}
}
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
}
......@@ -1244,8 +1268,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
u8 status;
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
if ((temp & SDVO_ENABLE) == 0) {
/* HW workaround for IBX, we need to move the port
* to transcoder A before disabling it. */
if (HAS_PCH_IBX(dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
/* Restore the transcoder select bit. */
if (pipe == PIPE_B)
temp |= SDVO_PIPE_B_SELECT;
}
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
}
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
......@@ -1796,7 +1832,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
kfree(intel_sdvo_connector);
}
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
......@@ -1828,7 +1864,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
......@@ -1883,7 +1919,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
if (intel_sdvo_connector->left == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->right, val);
if (intel_sdvo_connector->left_margin == temp_value)
return 0;
......@@ -1895,7 +1931,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->right == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->left, val);
if (intel_sdvo_connector->right_margin == temp_value)
return 0;
......@@ -1907,7 +1943,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->top == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->bottom, val);
if (intel_sdvo_connector->top_margin == temp_value)
return 0;
......@@ -1919,7 +1955,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (intel_sdvo_connector->bottom == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->top, val);
if (intel_sdvo_connector->bottom_margin == temp_value)
return 0;
......@@ -2429,7 +2465,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(&intel_sdvo_connector->base.base,
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
......@@ -2445,7 +2481,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
drm_connector_attach_property(connector, \
drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
......@@ -2482,7 +2518,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->left)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
......@@ -2491,7 +2527,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->right)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
......@@ -2519,7 +2555,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->top)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
......@@ -2529,7 +2565,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->bottom)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
......@@ -2561,7 +2597,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->dot_crawl)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
......
......@@ -1289,7 +1289,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
}
intel_tv->tv_format = tv_mode->name;
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
connector->dev->mode_config.tv_mode_property, i);
}
......@@ -1443,7 +1443,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
int ret = 0;
bool changed = false;
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret < 0)
goto out;
......@@ -1655,18 +1655,18 @@ intel_tv_init(struct drm_device *dev)
ARRAY_SIZE(tv_modes),
tv_format_names);
drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
initial_mode);
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
intel_tv->margin[TV_MARGIN_LEFT]);
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
intel_tv->margin[TV_MARGIN_TOP]);
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
intel_tv->margin[TV_MARGIN_RIGHT]);
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
drm_sysfs_connector_add(connector);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment