Commit 28f03607 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'topic/drm-misc-2016-01-17' of git://anongit.freedesktop.org/drm-intel into drm-next

Since your main drm-next pull isn't out of the door yet I figured I might
as well flush out drm-misc instead of delaying for 4.6. It's really just
random stuff all over, biggest thing probably connector_mask tracking from
Maarten.

* tag 'topic/drm-misc-2016-01-17' of git://anongit.freedesktop.org/drm-intel: (24 commits)
  drm/fb_cma_helper: Remove implicit call to disable_unused_functions
  drm/sysfs: use kobj_to_dev()
  drm/i915: Init power domains early in driver load
  drm: Do not set connector->encoder in drivers
  apple-gmux: Add initial documentation
  drm: move MODULE_PARM_DESC to other file
  drm/edid: index CEA/HDMI mode tables using the VIC
  drm/atomic: Remove drm_atomic_connectors_for_crtc.
  drm/i915: Update connector_mask during readout, v2.
  drm: Remove opencoded drm_gem_object_release_handle()
  drm: Do not set outparam on error during GEM handle allocation
  drm/docs: more leftovers from the big vtable documentation pile
  drm/atomic-helper: Reject legacy flips on a disabled pipe
  drm/atomic: add connector mask to drm_crtc_state.
  drm/tegra: Use __drm_atomic_helper_reset_connector for subclassing connector state, v2.
  drm/atomic: Add __drm_atomic_helper_connector_reset, v2.
  drm/i915: Set connector_state->connector using the helper.
  drm: Use a normal idr allocation for the obj->name
  drm: Only bump object-reference count when adding first handle
  drm: Balance error path for GEM handle allocation
  ...
parents e9c5e740 4314e19e
This diff is collapsed.
...@@ -1667,8 +1667,6 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi) ...@@ -1667,8 +1667,6 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi)
&dw_hdmi_connector_funcs, &dw_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA); DRM_MODE_CONNECTOR_HDMIA);
hdmi->connector.encoder = encoder;
drm_mode_connector_attach_encoder(&hdmi->connector, encoder); drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
return 0; return 0;
......
...@@ -508,6 +508,22 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, ...@@ -508,6 +508,22 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
return -EINVAL; return -EINVAL;
} }
/*
* Reject event generation for when a CRTC is off and stays off.
* It wouldn't be hard to implement this, but userspace has a track
* record of happily burning through 100% cpu (or worse, crash) when the
* display pipe is suspended. To avoid all that fun just reject updates
* that ask for events since likely that indicates a bug in the
* compositor's drawing loop. This is consistent with the vblank IOCTL
* and legacy page_flip IOCTL which also reject service on a disabled
* pipe.
*/
if (state->event && !state->active && !crtc->state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n",
crtc->base.id);
return -EINVAL;
}
return 0; return 0;
} }
...@@ -1063,10 +1079,21 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, ...@@ -1063,10 +1079,21 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
{ {
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
if (conn_state->crtc && conn_state->crtc != crtc) {
crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
conn_state->crtc);
crtc_state->connector_mask &=
~(1 << drm_connector_index(conn_state->connector));
}
if (crtc) { if (crtc) {
crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
if (IS_ERR(crtc_state)) if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state); return PTR_ERR(crtc_state);
crtc_state->connector_mask |=
1 << drm_connector_index(conn_state->connector);
} }
conn_state->crtc = crtc; conn_state->crtc = crtc;
...@@ -1171,36 +1198,6 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state, ...@@ -1171,36 +1198,6 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
} }
EXPORT_SYMBOL(drm_atomic_add_affected_planes); EXPORT_SYMBOL(drm_atomic_add_affected_planes);
/**
* drm_atomic_connectors_for_crtc - count number of connected outputs
* @state: atomic state
* @crtc: DRM crtc
*
* This function counts all connectors which will be connected to @crtc
* according to @state. Useful to recompute the enable state for @crtc.
*/
int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i, num_connected_connectors = 0;
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == crtc)
num_connected_connectors++;
}
DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d:%s]\n",
state, num_connected_connectors,
crtc->base.id, crtc->name);
return num_connected_connectors;
}
EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
/** /**
* drm_atomic_legacy_backoff - locking backoff for legacy ioctls * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
* @state: atomic state * @state: atomic state
......
...@@ -463,7 +463,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, ...@@ -463,7 +463,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
* crtc only changed its mode but has the same set of connectors. * crtc only changed its mode but has the same set of connectors.
*/ */
for_each_crtc_in_state(state, crtc, crtc_state, i) { for_each_crtc_in_state(state, crtc, crtc_state, i) {
int num_connectors; bool has_connectors =
!!crtc_state->connector_mask;
/* /*
* We must set ->active_changed after walking connectors for * We must set ->active_changed after walking connectors for
...@@ -492,10 +493,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, ...@@ -492,10 +493,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (ret != 0) if (ret != 0)
return ret; return ret;
num_connectors = drm_atomic_connectors_for_crtc(state, if (crtc_state->enable != has_connectors) {
crtc);
if (crtc_state->enable != !!num_connectors) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
crtc->base.id, crtc->name); crtc->base.id, crtc->name);
...@@ -1754,7 +1752,7 @@ static int update_output_state(struct drm_atomic_state *state, ...@@ -1754,7 +1752,7 @@ static int update_output_state(struct drm_atomic_state *state,
if (crtc == set->crtc) if (crtc == set->crtc)
continue; continue;
if (!drm_atomic_connectors_for_crtc(state, crtc)) { if (!crtc_state->connector_mask) {
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
NULL); NULL);
if (ret < 0) if (ret < 0)
...@@ -2284,6 +2282,15 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc, ...@@ -2284,6 +2282,15 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
goto fail; goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb); drm_atomic_set_fb_for_plane(plane_state, fb);
/* Make sure we don't accidentally do a full modeset. */
state->allow_modeset = false;
if (!crtc_state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
crtc->base.id);
ret = -EINVAL;
goto fail;
}
ret = drm_atomic_async_commit(state); ret = drm_atomic_async_commit(state);
if (ret != 0) if (ret != 0)
goto fail; goto fail;
...@@ -2605,6 +2612,28 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, ...@@ -2605,6 +2612,28 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
} }
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
/**
* __drm_atomic_helper_connector_reset - reset state on connector
* @connector: drm connector
* @conn_state: connector state to assign
*
* Initializes the newly allocated @conn_state and assigns it to
* #connector ->state, usually required when initializing the drivers
* or when called from the ->reset hook.
*
* This is useful for drivers that subclass the connector state.
*/
void
__drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state)
{
if (conn_state)
conn_state->connector = connector;
connector->state = conn_state;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
/** /**
* drm_atomic_helper_connector_reset - default ->reset hook for connectors * drm_atomic_helper_connector_reset - default ->reset hook for connectors
* @connector: drm connector * @connector: drm connector
...@@ -2615,11 +2644,11 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); ...@@ -2615,11 +2644,11 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
*/ */
void drm_atomic_helper_connector_reset(struct drm_connector *connector) void drm_atomic_helper_connector_reset(struct drm_connector *connector)
{ {
kfree(connector->state); struct drm_connector_state *conn_state =
connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL); kzalloc(sizeof(*conn_state), GFP_KERNEL);
if (connector->state) kfree(connector->state);
connector->state->connector = connector; __drm_atomic_helper_connector_reset(connector, conn_state);
} }
EXPORT_SYMBOL(drm_atomic_helper_connector_reset); EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
......
...@@ -5054,6 +5054,20 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector, ...@@ -5054,6 +5054,20 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
{ {
int i; int i;
/*
* In the past, drivers have attempted to model the static association
* of connector to encoder in simple connector/encoder devices using a
* direct assignment of connector->encoder = encoder. This connection
* is a logical one and the responsibility of the core, so drivers are
* expected not to mess with this.
*
* Note that the error return should've been enough here, but a large
* majority of drivers ignores the return value, so add in a big WARN
* to get people's attention.
*/
if (WARN_ON(connector->encoder))
return -EINVAL;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0) { if (connector->encoder_ids[i] == 0) {
connector->encoder_ids[i] = encoder->base.id; connector->encoder_ids[i] = encoder->base.id;
......
...@@ -44,10 +44,6 @@ MODULE_AUTHOR(CORE_AUTHOR); ...@@ -44,10 +44,6 @@ MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC); MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output"); MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600); module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock); static DEFINE_SPINLOCK(drm_minor_lock);
......
...@@ -637,8 +637,12 @@ static const struct minimode extra_modes[] = { ...@@ -637,8 +637,12 @@ static const struct minimode extra_modes[] = {
/* /*
* Probably taken from CEA-861 spec. * Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*
* Index using the VIC.
*/ */
static const struct drm_display_mode edid_cea_modes[] = { static const struct drm_display_mode edid_cea_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
/* 1 - 640x480@60Hz */ /* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0, 752, 800, 0, 480, 490, 492, 525, 0,
...@@ -987,9 +991,11 @@ static const struct drm_display_mode edid_cea_modes[] = { ...@@ -987,9 +991,11 @@ static const struct drm_display_mode edid_cea_modes[] = {
}; };
/* /*
* HDMI 1.4 4k modes. * HDMI 1.4 4k modes. Index using the VIC.
*/ */
static const struct drm_display_mode edid_4k_modes[] = { static const struct drm_display_mode edid_4k_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
/* 1 - 3840x2160@30Hz */ /* 1 - 3840x2160@30Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4016, 4104, 4400, 0, 3840, 4016, 4104, 4400, 0,
...@@ -2548,13 +2554,13 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode) ...@@ -2548,13 +2554,13 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match, static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance) unsigned int clock_tolerance)
{ {
u8 mode; u8 vic;
if (!to_match->clock) if (!to_match->clock)
return 0; return 0;
for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
unsigned int clock1, clock2; unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */ /* Check both 60Hz and 59.94Hz */
...@@ -2566,7 +2572,7 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m ...@@ -2566,7 +2572,7 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
continue; continue;
if (drm_mode_equal_no_clocks(to_match, cea_mode)) if (drm_mode_equal_no_clocks(to_match, cea_mode))
return mode + 1; return vic;
} }
return 0; return 0;
...@@ -2581,13 +2587,13 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m ...@@ -2581,13 +2587,13 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
*/ */
u8 drm_match_cea_mode(const struct drm_display_mode *to_match) u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{ {
u8 mode; u8 vic;
if (!to_match->clock) if (!to_match->clock)
return 0; return 0;
for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; const struct drm_display_mode *cea_mode = &edid_cea_modes[vic];
unsigned int clock1, clock2; unsigned int clock1, clock2;
/* Check both 60Hz and 59.94Hz */ /* Check both 60Hz and 59.94Hz */
...@@ -2597,12 +2603,17 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) ...@@ -2597,12 +2603,17 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode)) drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
return mode + 1; return vic;
} }
return 0; return 0;
} }
EXPORT_SYMBOL(drm_match_cea_mode); EXPORT_SYMBOL(drm_match_cea_mode);
static bool drm_valid_cea_vic(u8 vic)
{
return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
}
/** /**
* drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
* the input VIC from the CEA mode list * the input VIC from the CEA mode list
...@@ -2612,10 +2623,7 @@ EXPORT_SYMBOL(drm_match_cea_mode); ...@@ -2612,10 +2623,7 @@ EXPORT_SYMBOL(drm_match_cea_mode);
*/ */
enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code) enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{ {
/* return picture aspect ratio for video_code - 1 to access the return edid_cea_modes[video_code].picture_aspect_ratio;
* right array element
*/
return edid_cea_modes[video_code-1].picture_aspect_ratio;
} }
EXPORT_SYMBOL(drm_get_cea_aspect_ratio); EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
...@@ -2639,13 +2647,13 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode) ...@@ -2639,13 +2647,13 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match, static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
unsigned int clock_tolerance) unsigned int clock_tolerance)
{ {
u8 mode; u8 vic;
if (!to_match->clock) if (!to_match->clock)
return 0; return 0;
for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2; unsigned int clock1, clock2;
/* Make sure to also match alternate clocks */ /* Make sure to also match alternate clocks */
...@@ -2657,7 +2665,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_ ...@@ -2657,7 +2665,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
continue; continue;
if (drm_mode_equal_no_clocks(to_match, hdmi_mode)) if (drm_mode_equal_no_clocks(to_match, hdmi_mode))
return mode + 1; return vic;
} }
return 0; return 0;
...@@ -2673,13 +2681,13 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_ ...@@ -2673,13 +2681,13 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
*/ */
static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
{ {
u8 mode; u8 vic;
if (!to_match->clock) if (!to_match->clock)
return 0; return 0;
for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { for (vic = 1; vic < ARRAY_SIZE(edid_4k_modes); vic++) {
const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; const struct drm_display_mode *hdmi_mode = &edid_4k_modes[vic];
unsigned int clock1, clock2; unsigned int clock1, clock2;
/* Make sure to also match alternate clocks */ /* Make sure to also match alternate clocks */
...@@ -2689,11 +2697,16 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) ...@@ -2689,11 +2697,16 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode)) drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
return mode + 1; return vic;
} }
return 0; return 0;
} }
static bool drm_valid_hdmi_vic(u8 vic)
{
return vic > 0 && vic < ARRAY_SIZE(edid_4k_modes);
}
static int static int
add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
{ {
...@@ -2713,16 +2726,16 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) ...@@ -2713,16 +2726,16 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
list_for_each_entry(mode, &connector->probed_modes, head) { list_for_each_entry(mode, &connector->probed_modes, head) {
const struct drm_display_mode *cea_mode = NULL; const struct drm_display_mode *cea_mode = NULL;
struct drm_display_mode *newmode; struct drm_display_mode *newmode;
u8 mode_idx = drm_match_cea_mode(mode) - 1; u8 vic = drm_match_cea_mode(mode);
unsigned int clock1, clock2; unsigned int clock1, clock2;
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { if (drm_valid_cea_vic(vic)) {
cea_mode = &edid_cea_modes[mode_idx]; cea_mode = &edid_cea_modes[vic];
clock2 = cea_mode_alternate_clock(cea_mode); clock2 = cea_mode_alternate_clock(cea_mode);
} else { } else {
mode_idx = drm_match_hdmi_mode(mode) - 1; vic = drm_match_hdmi_mode(mode);
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { if (drm_valid_hdmi_vic(vic)) {
cea_mode = &edid_4k_modes[mode_idx]; cea_mode = &edid_4k_modes[vic];
clock2 = hdmi_mode_alternate_clock(cea_mode); clock2 = hdmi_mode_alternate_clock(cea_mode);
} }
} }
...@@ -2773,17 +2786,17 @@ drm_display_mode_from_vic_index(struct drm_connector *connector, ...@@ -2773,17 +2786,17 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode; struct drm_display_mode *newmode;
u8 cea_mode; u8 vic;
if (video_db == NULL || video_index >= video_len) if (video_db == NULL || video_index >= video_len)
return NULL; return NULL;
/* CEA modes are numbered 1..127 */ /* CEA modes are numbered 1..127 */
cea_mode = (video_db[video_index] & 127) - 1; vic = (video_db[video_index] & 127);
if (cea_mode >= ARRAY_SIZE(edid_cea_modes)) if (!drm_valid_cea_vic(vic))
return NULL; return NULL;
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
if (!newmode) if (!newmode)
return NULL; return NULL;
...@@ -2878,8 +2891,7 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic) ...@@ -2878,8 +2891,7 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_display_mode *newmode; struct drm_display_mode *newmode;
vic--; /* VICs start at 1 */ if (!drm_valid_hdmi_vic(vic)) {
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
DRM_ERROR("Unknown HDMI VIC: %d\n", vic); DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
return 0; return 0;
} }
...@@ -3170,24 +3182,24 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode) ...@@ -3170,24 +3182,24 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
{ {
const struct drm_display_mode *cea_mode; const struct drm_display_mode *cea_mode;
int clock1, clock2, clock; int clock1, clock2, clock;
u8 mode_idx; u8 vic;
const char *type; const char *type;
/* /*
* allow 5kHz clock difference either way to account for * allow 5kHz clock difference either way to account for
* the 10kHz clock resolution limit of detailed timings. * the 10kHz clock resolution limit of detailed timings.
*/ */
mode_idx = drm_match_cea_mode_clock_tolerance(mode, 5) - 1; vic = drm_match_cea_mode_clock_tolerance(mode, 5);
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { if (drm_valid_cea_vic(vic)) {
type = "CEA"; type = "CEA";
cea_mode = &edid_cea_modes[mode_idx]; cea_mode = &edid_cea_modes[vic];
clock1 = cea_mode->clock; clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode); clock2 = cea_mode_alternate_clock(cea_mode);
} else { } else {
mode_idx = drm_match_hdmi_mode_clock_tolerance(mode, 5) - 1; vic = drm_match_hdmi_mode_clock_tolerance(mode, 5);
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { if (drm_valid_hdmi_vic(vic)) {
type = "HDMI"; type = "HDMI";
cea_mode = &edid_4k_modes[mode_idx]; cea_mode = &edid_4k_modes[vic];
clock1 = cea_mode->clock; clock1 = cea_mode->clock;
clock2 = hdmi_mode_alternate_clock(cea_mode); clock2 = hdmi_mode_alternate_clock(cea_mode);
} else { } else {
...@@ -3205,7 +3217,7 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode) ...@@ -3205,7 +3217,7 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
return; return;
DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n", DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
type, mode_idx + 1, mode->clock, clock); type, vic, mode->clock, clock);
mode->clock = clock; mode->clock = clock;
} }
......
...@@ -348,9 +348,6 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, ...@@ -348,9 +348,6 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
} }
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(helper, preferred_bpp); ret = drm_fb_helper_initial_config(helper, preferred_bpp);
if (ret < 0) { if (ret < 0) {
dev_err(dev->dev, "Failed to set initial hw configuration.\n"); dev_err(dev->dev, "Failed to set initial hw configuration.\n");
......
...@@ -1251,7 +1251,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var, ...@@ -1251,7 +1251,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
goto fail; goto fail;
plane = mode_set->crtc->primary; plane = mode_set->crtc->primary;
plane_mask |= drm_plane_index(plane); plane_mask |= (1 << drm_plane_index(plane));
plane->old_fb = plane->fb; plane->old_fb = plane->fb;
} }
......
...@@ -220,6 +220,9 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) ...@@ -220,6 +220,9 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
static void static void
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev;
bool final = false;
if (WARN_ON(obj->handle_count == 0)) if (WARN_ON(obj->handle_count == 0))
return; return;
...@@ -229,14 +232,39 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) ...@@ -229,14 +232,39 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
* checked for a name * checked for a name
*/ */
mutex_lock(&obj->dev->object_name_lock); mutex_lock(&dev->object_name_lock);
if (--obj->handle_count == 0) { if (--obj->handle_count == 0) {
drm_gem_object_handle_free(obj); drm_gem_object_handle_free(obj);
drm_gem_object_exported_dma_buf_free(obj); drm_gem_object_exported_dma_buf_free(obj);
final = true;
} }
mutex_unlock(&obj->dev->object_name_lock); mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj); if (final)
drm_gem_object_unreference_unlocked(obj);
}
/*
* Called at device or object close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
} }
/** /**
...@@ -277,14 +305,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) ...@@ -277,14 +305,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle); idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock); spin_unlock(&filp->table_lock);
if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_gem_object_release_handle(handle, obj, filp);
drm_gem_remove_prime_handles(obj, filp);
drm_vma_node_revoke(&obj->vma_node, filp->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
drm_gem_object_handle_unreference_unlocked(obj);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_gem_handle_delete); EXPORT_SYMBOL(drm_gem_handle_delete);
...@@ -326,9 +347,12 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, ...@@ -326,9 +347,12 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
u32 *handlep) u32 *handlep)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
u32 handle;
int ret; int ret;
WARN_ON(!mutex_is_locked(&dev->object_name_lock)); WARN_ON(!mutex_is_locked(&dev->object_name_lock));
if (obj->handle_count++ == 0)
drm_gem_object_reference(obj);
/* /*
* Get the user-visible handle using idr. Preload and perform * Get the user-visible handle using idr. Preload and perform
...@@ -338,32 +362,38 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, ...@@ -338,32 +362,38 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
spin_lock(&file_priv->table_lock); spin_lock(&file_priv->table_lock);
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
drm_gem_object_reference(obj);
obj->handle_count++;
spin_unlock(&file_priv->table_lock); spin_unlock(&file_priv->table_lock);
idr_preload_end(); idr_preload_end();
mutex_unlock(&dev->object_name_lock); mutex_unlock(&dev->object_name_lock);
if (ret < 0) { if (ret < 0)
drm_gem_object_handle_unreference_unlocked(obj); goto err_unref;
return ret;
} handle = ret;
*handlep = ret;
ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
if (ret) { if (ret)
drm_gem_handle_delete(file_priv, *handlep); goto err_remove;
return ret;
}
if (dev->driver->gem_open_object) { if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv); ret = dev->driver->gem_open_object(obj, file_priv);
if (ret) { if (ret)
drm_gem_handle_delete(file_priv, *handlep); goto err_revoke;
return ret;
}
} }
*handlep = handle;
return 0; return 0;
err_revoke:
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
err_remove:
spin_lock(&file_priv->table_lock);
idr_remove(&file_priv->object_idr, handle);
spin_unlock(&file_priv->table_lock);
err_unref:
drm_gem_object_handle_unreference_unlocked(obj);
return ret;
} }
/** /**
...@@ -630,7 +660,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, ...@@ -630,7 +660,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
return -ENOENT; return -ENOENT;
mutex_lock(&dev->object_name_lock); mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
/* prevent races with concurrent gem_close. */ /* prevent races with concurrent gem_close. */
if (obj->handle_count == 0) { if (obj->handle_count == 0) {
ret = -ENOENT; ret = -ENOENT;
...@@ -638,7 +667,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, ...@@ -638,7 +667,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
} }
if (!obj->name) { if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto err; goto err;
...@@ -649,7 +678,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, ...@@ -649,7 +678,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
ret = 0; ret = 0;
err: err:
idr_preload_end();
mutex_unlock(&dev->object_name_lock); mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(obj);
return ret; return ret;
...@@ -714,29 +742,6 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private) ...@@ -714,29 +742,6 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
spin_lock_init(&file_private->table_lock); spin_lock_init(&file_private->table_lock);
} }
/*
* Called at device close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
/** /**
* drm_gem_release - release file-private GEM resources * drm_gem_release - release file-private GEM resources
* @dev: drm_device which is being closed by userspace * @dev: drm_device which is being closed by userspace
......
...@@ -73,6 +73,9 @@ static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ ...@@ -73,6 +73,9 @@ static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
static void store_vblank(struct drm_device *dev, unsigned int pipe, static void store_vblank(struct drm_device *dev, unsigned int pipe,
u32 vblank_count_inc, u32 vblank_count_inc,
......
...@@ -240,7 +240,7 @@ static ssize_t edid_show(struct file *filp, struct kobject *kobj, ...@@ -240,7 +240,7 @@ static ssize_t edid_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, struct bin_attribute *attr, char *buf, loff_t off,
size_t count) size_t count)
{ {
struct device *connector_dev = container_of(kobj, struct device, kobj); struct device *connector_dev = kobj_to_dev(kobj);
struct drm_connector *connector = to_drm_connector(connector_dev); struct drm_connector *connector = to_drm_connector(connector_dev);
unsigned char *edid; unsigned char *edid;
size_t size; size_t size;
......
...@@ -187,7 +187,7 @@ static bool gma_resume_pci(struct pci_dev *pdev) ...@@ -187,7 +187,7 @@ static bool gma_resume_pci(struct pci_dev *pdev)
*/ */
int gma_power_suspend(struct device *_dev) int gma_power_suspend(struct device *_dev)
{ {
struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev); struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_private *dev_priv = dev->dev_private;
...@@ -214,7 +214,7 @@ int gma_power_suspend(struct device *_dev) ...@@ -214,7 +214,7 @@ int gma_power_suspend(struct device *_dev)
*/ */
int gma_power_resume(struct device *_dev) int gma_power_resume(struct device *_dev)
{ {
struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev); struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
mutex_lock(&power_mutex); mutex_lock(&power_mutex);
......
...@@ -1446,7 +1446,6 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data) ...@@ -1446,7 +1446,6 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
if (ret) if (ret)
goto err_sysfs; goto err_sysfs;
priv->connector.encoder = &priv->encoder;
drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder); drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
return 0; return 0;
......
...@@ -406,6 +406,8 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -406,6 +406,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_gem_stolen; goto cleanup_gem_stolen;
intel_setup_gmbus(dev);
/* Important: The output setup functions called by modeset_init need /* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */ * working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev); intel_modeset_init(dev);
...@@ -455,6 +457,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -455,6 +457,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_irq: cleanup_irq:
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
intel_teardown_gmbus(dev);
cleanup_gem_stolen: cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo: cleanup_vga_switcheroo:
...@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Try to make sure MCHBAR is enabled before poking at it */ /* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev); intel_setup_mchbar(dev);
intel_setup_gmbus(dev);
intel_opregion_setup(dev); intel_opregion_setup(dev);
i915_gem_load(dev); i915_gem_load(dev);
...@@ -1101,7 +1103,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1101,7 +1103,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev->pdev->msi_enabled) if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev); pci_disable_msi(dev->pdev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos); pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
...@@ -1203,7 +1204,6 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1203,7 +1204,6 @@ int i915_driver_unload(struct drm_device *dev)
intel_csr_ucode_fini(dev_priv); intel_csr_ucode_fini(dev_priv);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->hotplug.dp_wq); destroy_workqueue(dev_priv->hotplug.dp_wq);
......
...@@ -6492,13 +6492,11 @@ static void intel_connector_check_state(struct intel_connector *connector) ...@@ -6492,13 +6492,11 @@ static void intel_connector_check_state(struct intel_connector *connector)
int intel_connector_init(struct intel_connector *connector) int intel_connector_init(struct intel_connector *connector)
{ {
struct drm_connector_state *connector_state; drm_atomic_helper_connector_reset(&connector->base);
connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL); if (!connector->base.state)
if (!connector_state)
return -ENOMEM; return -ENOMEM;
connector->base.state = connector_state;
return 0; return 0;
} }
...@@ -15446,6 +15444,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) ...@@ -15446,6 +15444,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0); WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
crtc->base.state->active = crtc->active; crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active; crtc->base.enabled = crtc->active;
crtc->base.state->connector_mask = 0;
/* Because we only establish the connector -> encoder -> /* Because we only establish the connector -> encoder ->
* crtc links if something is active, this means the * crtc links if something is active, this means the
...@@ -15648,7 +15647,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) ...@@ -15648,7 +15647,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_connector(dev, connector) { for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) { if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON; connector->base.dpms = DRM_MODE_DPMS_ON;
connector->base.encoder = &connector->encoder->base;
encoder = connector->encoder;
connector->base.encoder = &encoder->base;
if (encoder->base.crtc &&
encoder->base.crtc->state->active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
encoder->base.crtc->state->connector_mask |=
1 << drm_connector_index(&connector->base);
}
} else { } else {
connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL; connector->base.encoder = NULL;
...@@ -15893,6 +15906,8 @@ void intel_modeset_cleanup(struct drm_device *dev) ...@@ -15893,6 +15906,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
intel_cleanup_gt_powersave(dev); intel_cleanup_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
intel_teardown_gmbus(dev);
} }
/* /*
......
...@@ -534,7 +534,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) ...@@ -534,7 +534,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev); drm_kms_helper_hotplug_event(dev);
} }
static struct drm_dp_mst_topology_cbs mst_cbs = { static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector, .add_connector = intel_dp_add_mst_connector,
.register_connector = intel_dp_register_mst_connector, .register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector,
......
...@@ -305,6 +305,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) ...@@ -305,6 +305,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
legacyfb_depth = 16; legacyfb_depth = 16;
} }
drm_helper_disable_unused_functions(drm);
imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
drm->mode_config.num_crtc, MAX_CRTC); drm->mode_config.num_crtc, MAX_CRTC);
if (IS_ERR(imxdrm->fbhelper)) { if (IS_ERR(imxdrm->fbhelper)) {
......
...@@ -204,8 +204,6 @@ static int imx_pd_register(struct drm_device *drm, ...@@ -204,8 +204,6 @@ static int imx_pd_register(struct drm_device *drm,
drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder); drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
imxpd->connector.encoder = &imxpd->encoder;
return 0; return 0;
} }
......
...@@ -329,7 +329,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) ...@@ -329,7 +329,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev); drm_kms_helper_hotplug_event(dev);
} }
struct drm_dp_mst_topology_cbs mst_cbs = { const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector, .add_connector = radeon_dp_add_mst_connector,
.register_connector = radeon_dp_register_mst_connector, .register_connector = radeon_dp_register_mst_connector,
.destroy_connector = radeon_dp_destroy_mst_connector, .destroy_connector = radeon_dp_destroy_mst_connector,
......
...@@ -739,8 +739,6 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev, ...@@ -739,8 +739,6 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
if (ret < 0) if (ret < 0)
goto err_backlight; goto err_backlight;
connector->encoder = encoder;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
drm_object_property_set_value(&connector->base, drm_object_property_set_value(&connector->base,
sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
......
...@@ -160,6 +160,7 @@ static int sti_load(struct drm_device *dev, unsigned long flags) ...@@ -160,6 +160,7 @@ static int sti_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_reset(dev); drm_mode_config_reset(dev);
drm_helper_disable_unused_functions(dev);
drm_fbdev_cma_init(dev, 32, drm_fbdev_cma_init(dev, 32,
dev->mode_config.num_crtc, dev->mode_config.num_crtc,
dev->mode_config.num_connector); dev->mode_config.num_connector);
......
...@@ -745,14 +745,13 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi) ...@@ -745,14 +745,13 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
static void tegra_dsi_connector_reset(struct drm_connector *connector) static void tegra_dsi_connector_reset(struct drm_connector *connector)
{ {
struct tegra_dsi_state *state; struct tegra_dsi_state *state =
kzalloc(sizeof(*state), GFP_KERNEL);
kfree(connector->state);
connector->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL); if (state) {
if (state) kfree(connector->state);
connector->state = &state->base; __drm_atomic_helper_connector_reset(connector, &state->base);
}
} }
static struct drm_connector_state * static struct drm_connector_state *
......
...@@ -294,6 +294,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags) ...@@ -294,6 +294,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
break; break;
} }
drm_helper_disable_unused_functions(dev);
priv->fbdev = drm_fbdev_cma_init(dev, bpp, priv->fbdev = drm_fbdev_cma_init(dev, bpp,
dev->mode_config.num_crtc, dev->mode_config.num_crtc,
dev->mode_config.num_connector); dev->mode_config.num_connector);
......
...@@ -328,7 +328,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -328,7 +328,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
/* The pixelvalve can only feed one encoder (and encoders are /* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.) * 1:1 with connectors.)
*/ */
if (drm_atomic_connectors_for_crtc(state->state, crtc) > 1) if (hweight32(state->connector_mask) > 1)
return -EINVAL; return -EINVAL;
drm_atomic_crtc_state_for_each_plane(plane, state) { drm_atomic_crtc_state_for_each_plane(plane, state) {
......
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
* for the inactive GPU.) Also, muxes are often used to cut power to the * for the inactive GPU.) Also, muxes are often used to cut power to the
* discrete GPU while it is not used. * discrete GPU while it is not used.
* *
* DRM drivers register GPUs with vga_switcheroo, these are heretoforth called * DRM drivers register GPUs with vga_switcheroo, these are henceforth called
* clients. The mux is called the handler. Muxless machines also register a * clients. The mux is called the handler. Muxless machines also register a
* handler to control the power state of the discrete GPU, its ->switchto * handler to control the power state of the discrete GPU, its ->switchto
* callback is a no-op for obvious reasons. The discrete GPU is often equipped * callback is a no-op for obvious reasons. The discrete GPU is often equipped
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* *
* Copyright (C) Canonical Ltd. <seth.forshee@canonical.com> * Copyright (C) Canonical Ltd. <seth.forshee@canonical.com>
* Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de> * Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de>
* Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -26,6 +27,24 @@ ...@@ -26,6 +27,24 @@
#include <acpi/video.h> #include <acpi/video.h>
#include <asm/io.h> #include <asm/io.h>
/**
* DOC: Overview
*
* :1: http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
* :2: http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
*
* gmux is a microcontroller built into the MacBook Pro to support dual GPUs:
* A {1}[Lattice XP2] on pre-retinas, a {2}[Renesas R4F2113] on retinas.
*
* (The MacPro6,1 2013 also has a gmux, however it is unclear why since it has
* dual GPUs but no built-in display.)
*
* gmux is connected to the LPC bus of the southbridge. Its I/O ports are
* accessed differently depending on the microcontroller: Driver functions
* to access a pre-retina gmux are infixed `_pio_`, those for a retina gmux
* are infixed `_index_`.
*/
struct apple_gmux_data { struct apple_gmux_data {
unsigned long iostart; unsigned long iostart;
unsigned long iolen; unsigned long iolen;
...@@ -247,6 +266,20 @@ static bool gmux_is_indexed(struct apple_gmux_data *gmux_data) ...@@ -247,6 +266,20 @@ static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
return false; return false;
} }
/**
* DOC: Backlight control
*
* :3: http://www.ti.com/lit/ds/symlink/lp8543.pdf
* :4: http://www.ti.com/lit/ds/symlink/lp8545.pdf
*
* On single GPU MacBooks, the PWM signal for the backlight is generated by
* the GPU. On dual GPU MacBook Pros by contrast, either GPU may be suspended
* to conserve energy. Hence the PWM signal needs to be generated by a separate
* backlight driver which is controlled by gmux. The earliest generation
* MBP5 2008/09 uses a {3}[TI LP8543] backlight driver. All newer models
* use a {4}[TI LP8545].
*/
static int gmux_get_brightness(struct backlight_device *bd) static int gmux_get_brightness(struct backlight_device *bd)
{ {
struct apple_gmux_data *gmux_data = bl_get_data(bd); struct apple_gmux_data *gmux_data = bl_get_data(bd);
...@@ -273,6 +306,68 @@ static const struct backlight_ops gmux_bl_ops = { ...@@ -273,6 +306,68 @@ static const struct backlight_ops gmux_bl_ops = {
.update_status = gmux_update_status, .update_status = gmux_update_status,
}; };
/**
* DOC: Graphics mux
*
* :5: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
* :6: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
* :7: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
* :8: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
* :9: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
* :10: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
* :11: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
*
* On pre-retinas, the LVDS outputs of both GPUs feed into gmux which muxes
* either of them to the panel. One of the tricks gmux has up its sleeve is
* to lengthen the blanking interval of its output during a switch to
* synchronize it with the GPU switched to. This allows for a flicker-free
* switch that is imperceptible by the user ({5}[US 8,687,007 B2]).
*
* On retinas, muxing is no longer done by gmux itself, but by a separate
* chip which is controlled by gmux. The chip is triple sourced, it is
* either an {6}[NXP CBTL06142], {7}[TI HD3SS212] or {8}[Pericom PI3VDP12412].
* The panel is driven with eDP instead of LVDS since the pixel clock
* required for retina resolution exceeds LVDS' limits.
*
* Pre-retinas are able to switch the panel's DDC pins separately.
* This is handled by a {9}[TI SN74LV4066A] which is controlled by gmux.
* The inactive GPU can thus probe the panel's EDID without switching over
* the entire panel. Retinas lack this functionality as the chips used for
* eDP muxing are incapable of switching the AUX channel separately (see
* the linked data sheets, Pericom would be capable but this is unused).
* However the retina panel has the NO_AUX_HANDSHAKE_LINK_TRAINING bit set
* in its DPCD, allowing the inactive GPU to skip the AUX handshake and
* set up the output with link parameters pre-calibrated by the active GPU.
*
* The external DP port is only fully switchable on the first two unibody
* MacBook Pro generations, MBP5 2008/09 and MBP6 2010. This is done by an
* {6}[NXP CBTL06141] which is controlled by gmux. It's the predecessor of the
* eDP mux on retinas, the difference being support for 2.7 versus 5.4 Gbit/s.
*
* The following MacBook Pro generations replaced the external DP port with a
* combined DP/Thunderbolt port and lost the ability to switch it between GPUs,
* connecting it either to the discrete GPU or the Thunderbolt controller.
* Oddly enough, while the full port is no longer switchable, AUX and HPD
* are still switchable by way of an {10}[NXP CBTL03062] (on pre-retinas
* MBP8 2011 and MBP9 2012) or two {11}[TI TS3DS10224] (on retinas) under the
* control of gmux. Since the integrated GPU is missing the main link,
* external displays appear to it as phantoms which fail to link-train.
*
* gmux receives the HPD signal of all display connectors and sends an
* interrupt on hotplug. On generations which cannot switch external ports,
* the discrete GPU can then be woken to drive the newly connected display.
* The ability to switch AUX on these generations could be used to improve
* reliability of hotplug detection by having the integrated GPU poll the
* ports while the discrete GPU is asleep, but currently we do not make use
* of this feature.
*
* gmux' initial switch state on bootup is user configurable via the EFI
* variable `gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9` (5th byte,
* 1 = IGD, 0 = DIS). Based on this setting, the EFI firmware tells gmux to
* switch the panel and the external DP connector and allocates a framebuffer
* for the selected GPU.
*/
static int gmux_switchto(enum vga_switcheroo_client_id id) static int gmux_switchto(enum vga_switcheroo_client_id id)
{ {
if (id == VGA_SWITCHEROO_IGD) { if (id == VGA_SWITCHEROO_IGD) {
...@@ -288,6 +383,14 @@ static int gmux_switchto(enum vga_switcheroo_client_id id) ...@@ -288,6 +383,14 @@ static int gmux_switchto(enum vga_switcheroo_client_id id)
return 0; return 0;
} }
/**
* DOC: Power control
*
* gmux is able to cut power to the discrete GPU. It automatically takes care
* of the correct sequence to tear down and bring up the power rails for
* core voltage, VRAM and PCIe.
*/
static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data, static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data,
enum vga_switcheroo_state state) enum vga_switcheroo_state state)
{ {
...@@ -352,6 +455,16 @@ static const struct vga_switcheroo_handler gmux_handler = { ...@@ -352,6 +455,16 @@ static const struct vga_switcheroo_handler gmux_handler = {
.get_client_id = gmux_get_client_id, .get_client_id = gmux_get_client_id,
}; };
/**
* DOC: Interrupt
*
* gmux is also connected to a GPIO pin of the southbridge and thereby is able
* to trigger an ACPI GPE. On the MBP5 2008/09 it's GPIO pin 22 of the Nvidia
* MCP79, on all following generations it's GPIO pin 6 of the Intel PCH.
* The GPE merely signals that an interrupt occurred, the actual type of event
* is identified by reading a gmux register.
*/
static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data) static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data)
{ {
gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
......
...@@ -130,10 +130,6 @@ int __must_check ...@@ -130,10 +130,6 @@ int __must_check
drm_atomic_add_affected_planes(struct drm_atomic_state *state, drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc); struct drm_crtc *crtc);
int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc);
void drm_atomic_legacy_backoff(struct drm_atomic_state *state); void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
void void
......
...@@ -126,6 +126,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, ...@@ -126,6 +126,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state); struct drm_plane_state *state);
void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector); void drm_atomic_helper_connector_reset(struct drm_connector *connector);
void void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
......
...@@ -306,6 +306,7 @@ struct drm_plane_helper_funcs; ...@@ -306,6 +306,7 @@ struct drm_plane_helper_funcs;
* @active_changed: crtc_state->active has been toggled. * @active_changed: crtc_state->active has been toggled.
* @connectors_changed: connectors to this crtc have been updated * @connectors_changed: connectors to this crtc have been updated
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
* @last_vblank_count: for helpers and drivers to capture the vblank of the * @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early * update to ensure framebuffer cleanup isn't done too early
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
...@@ -339,6 +340,8 @@ struct drm_crtc_state { ...@@ -339,6 +340,8 @@ struct drm_crtc_state {
*/ */
u32 plane_mask; u32 plane_mask;
u32 connector_mask;
/* last_vblank_count: for vblank waits before cleanup */ /* last_vblank_count: for vblank waits before cleanup */
u32 last_vblank_count; u32 last_vblank_count;
...@@ -548,7 +551,8 @@ struct drm_crtc_funcs { ...@@ -548,7 +551,8 @@ struct drm_crtc_funcs {
* ->page_flip() operation is already pending the callback should return * ->page_flip() operation is already pending the callback should return
* -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode
* or just runtime disabled through DPMS respectively the new atomic * or just runtime disabled through DPMS respectively the new atomic
* "ACTIVE" state) should result in an -EINVAL error code. * "ACTIVE" state) should result in an -EINVAL error code. Note that
* drm_atomic_helper_page_flip() checks this already for atomic drivers.
*/ */
int (*page_flip)(struct drm_crtc *crtc, int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
......
...@@ -421,7 +421,7 @@ struct drm_dp_payload { ...@@ -421,7 +421,7 @@ struct drm_dp_payload {
struct drm_dp_mst_topology_mgr { struct drm_dp_mst_topology_mgr {
struct device *dev; struct device *dev;
struct drm_dp_mst_topology_cbs *cbs; const struct drm_dp_mst_topology_cbs *cbs;
int max_dpcd_transaction_bytes; int max_dpcd_transaction_bytes;
struct drm_dp_aux *aux; /* auxch for this topology mgr to use */ struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
int max_payloads; int max_payloads;
......
...@@ -131,6 +131,20 @@ struct drm_crtc_helper_funcs { ...@@ -131,6 +131,20 @@ struct drm_crtc_helper_funcs {
* Atomic drivers which need to inspect and adjust more state should * Atomic drivers which need to inspect and adjust more state should
* instead use the @atomic_check callback. * instead use the @atomic_check callback.
* *
* Also beware that neither core nor helpers filter modes before
* passing them to the driver: While the list of modes that is
* advertised to userspace is filtered using the connector's
* ->mode_valid() callback, neither the core nor the helpers do any
* filtering on modes passed in from userspace when setting a mode. It
* is therefore possible for userspace to pass in a mode that was
* previously filtered out using ->mode_valid() or add a custom mode
* that wasn't probed from EDID or similar to begin with. Even though
* this is an advanced feature and rarely used nowadays, some users rely
* on being able to specify modes manually so drivers must be prepared
* to deal with it. Specifically this means that all drivers need not
* only validate modes in ->mode_valid() but also in ->mode_fixup() to
* make sure invalid modes passed in from userspace are rejected.
*
* RETURNS: * RETURNS:
* *
* True if an acceptable configuration is possible, false if the modeset * True if an acceptable configuration is possible, false if the modeset
...@@ -188,7 +202,9 @@ struct drm_crtc_helper_funcs { ...@@ -188,7 +202,9 @@ struct drm_crtc_helper_funcs {
* This callback is used by the legacy CRTC helpers to set a new * This callback is used by the legacy CRTC helpers to set a new
* framebuffer and scanout position. It is optional and used as an * framebuffer and scanout position. It is optional and used as an
* optimized fast-path instead of a full mode set operation with all the * optimized fast-path instead of a full mode set operation with all the
* resulting flickering. Since it can't update other planes it's * resulting flickering. If it is not present
* drm_crtc_helper_set_config() will fall back to a full modeset, using
* the ->mode_set() callback. Since it can't update other planes it's
* incompatible with atomic modeset support. * incompatible with atomic modeset support.
* *
* This callback is only used by the CRTC helpers and deprecated. * This callback is only used by the CRTC helpers and deprecated.
...@@ -439,6 +455,20 @@ struct drm_encoder_helper_funcs { ...@@ -439,6 +455,20 @@ struct drm_encoder_helper_funcs {
* Atomic drivers which need to inspect and adjust more state should * Atomic drivers which need to inspect and adjust more state should
* instead use the @atomic_check callback. * instead use the @atomic_check callback.
* *
* Also beware that neither core nor helpers filter modes before
* passing them to the driver: While the list of modes that is
* advertised to userspace is filtered using the connector's
* ->mode_valid() callback, neither the core nor the helpers do any
* filtering on modes passed in from userspace when setting a mode. It
* is therefore possible for userspace to pass in a mode that was
* previously filtered out using ->mode_valid() or add a custom mode
* that wasn't probed from EDID or similar to begin with. Even though
* this is an advanced feature and rarely used nowadays, some users rely
* on being able to specify modes manually so drivers must be prepared
* to deal with it. Specifically this means that all drivers need not
* only validate modes in ->mode_valid() but also in ->mode_fixup() to
* make sure invalid modes passed in from userspace are rejected.
*
* RETURNS: * RETURNS:
* *
* True if an acceptable configuration is possible, false if the modeset * True if an acceptable configuration is possible, false if the modeset
...@@ -640,8 +670,16 @@ struct drm_connector_helper_funcs { ...@@ -640,8 +670,16 @@ struct drm_connector_helper_funcs {
* In this function drivers then parse the modes in the EDID and add * In this function drivers then parse the modes in the EDID and add
* them by calling drm_add_edid_modes(). But connectors that driver a * them by calling drm_add_edid_modes(). But connectors that driver a
* fixed panel can also manually add specific modes using * fixed panel can also manually add specific modes using
* drm_mode_probed_add(). Finally drivers that support audio probably * drm_mode_probed_add(). Drivers which manually add modes should also
* want to update the ELD data, too, using drm_edid_to_eld(). * make sure that the @display_info, @width_mm and @height_mm fields of the
* struct #drm_connector are filled in.
*
* Virtual drivers that just want some standard VESA mode with a given
* resolution can call drm_add_modes_noedid(), and mark the preferred
* one using drm_set_preferred_mode().
*
* Finally drivers that support audio probably want to update the ELD
* data, too, using drm_edid_to_eld().
* *
* This function is only called after the ->detect() hook has indicated * This function is only called after the ->detect() hook has indicated
* that a sink is connected and when the EDID isn't overridden through * that a sink is connected and when the EDID isn't overridden through
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment