Commit 1929041b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pill drm updates part 2 from Dave Airlie:
 "This is the follow-up pull, 3 pieces

  a) exynos next stuff, was delayed but looks okay to me, one patch in
     v4l bits but it was acked by v4l person.
  b) UAPI disintegration bits
  c) intel fixes - DP fixes, hang fixes, other misc fixes."

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (52 commits)
  drm: exynos: hdmi: remove drm common hdmi platform data struct
  drm: exynos: hdmi: add support for exynos5 hdmi
  drm: exynos: hdmi: replace is_v13 with version check in hdmi
  drm: exynos: hdmi: add support for exynos5 mixer
  drm: exynos: hdmi: add support to disable video processor in mixer
  drm: exynos: hdmi: add support for platform variants for mixer
  drm: exynos: hdmi: add support for exynos5 hdmiphy
  drm: exynos: hdmi: add support for exynos5 ddc
  drm: exynos: remove drm hdmi platform data struct
  drm: exynos: hdmi: turn off HPD interrupt in HDMI chip
  drm: exynos: hdmi: use s5p-hdmi platform data
  drm: exynos: hdmi: fix interrupt handling
  drm: exynos: hdmi: support for platform variants
  media: s5p-hdmi: add HPD GPIO to platform data
  UAPI: (Scripted) Disintegrate include/drm
  drm/i915: Fix GT_MODE default value
  drm/i915: don't frob the vblank ts in finish_page_flip
  drm/i915: call drm_handle_vblank before finish_page_flip
  drm/i915: print warning if vmi915_gem_fault error is not handled
  drm/i915: EBUSY status handling added to i915_gem_fault().
  ...
parents d43b7167 1f31c69d
......@@ -395,13 +395,14 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
* \param adapter : i2c device adaptor
* \return 1 on success
*/
static bool
bool
drm_probe_ddc(struct i2c_adapter *adapter)
{
unsigned char out;
return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
}
EXPORT_SYMBOL(drm_probe_ddc);
/**
* drm_get_edid - get EDID data, if available
......
......@@ -26,29 +26,41 @@ static int s5p_ddc_probe(struct i2c_client *client,
{
hdmi_attach_ddc_client(client);
dev_info(&client->adapter->dev, "attached s5p_ddc "
"into i2c adapter successfully\n");
dev_info(&client->adapter->dev,
"attached %s into i2c adapter successfully\n",
client->name);
return 0;
}
static int s5p_ddc_remove(struct i2c_client *client)
{
dev_info(&client->adapter->dev, "detached s5p_ddc "
"from i2c adapter successfully\n");
dev_info(&client->adapter->dev,
"detached %s from i2c adapter successfully\n",
client->name);
return 0;
}
static struct i2c_device_id ddc_idtable[] = {
{"s5p_ddc", 0},
{"exynos5-hdmiddc", 0},
{ },
};
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
}, {
/* end node */
}
};
struct i2c_driver ddc_driver = {
.driver = {
.name = "s5p_ddc",
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
.of_match_table = hdmiddc_match_types,
},
.id_table = ddc_idtable,
.probe = s5p_ddc_probe,
......
......@@ -40,6 +40,7 @@ struct exynos_drm_connector {
struct drm_connector drm_connector;
uint32_t encoder_id;
struct exynos_drm_manager *manager;
uint32_t dpms;
};
/* convert exynos_video_timings to drm_display_mode */
......@@ -149,8 +150,12 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
count = drm_add_edid_modes(connector, edid);
kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
struct exynos_drm_panel_info *panel;
struct drm_display_mode *mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_ERROR("failed to create a new display mode.\n");
return 0;
}
if (display_ops->get_panel)
panel = display_ops->get_panel(manager->dev);
......@@ -194,8 +199,7 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
return ret;
}
static struct drm_encoder *exynos_drm_best_encoder(
struct drm_connector *connector)
struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct exynos_drm_connector *exynos_connector =
......@@ -224,6 +228,43 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
.best_encoder = exynos_drm_best_encoder,
};
void exynos_drm_display_power(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = exynos_drm_best_encoder(connector);
struct exynos_drm_connector *exynos_connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_display_ops *display_ops = manager->display_ops;
exynos_connector = to_exynos_connector(connector);
if (exynos_connector->dpms == mode) {
DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
return;
}
if (display_ops && display_ops->power_on)
display_ops->power_on(manager->dev, mode);
exynos_connector->dpms = mode;
}
static void exynos_drm_connector_dpms(struct drm_connector *connector,
int mode)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
/*
* in case that drm_crtc_helper_set_mode() is called,
* encoder/crtc->funcs->dpms() will be just returned
* because they already were DRM_MODE_DPMS_ON so only
* exynos_drm_display_power() will be called.
*/
drm_helper_connector_dpms(connector, mode);
exynos_drm_display_power(connector, mode);
}
static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
unsigned int max_width, unsigned int max_height)
{
......@@ -283,7 +324,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector)
}
static struct drm_connector_funcs exynos_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.dpms = exynos_drm_connector_dpms,
.fill_modes = exynos_drm_connector_fill_modes,
.detect = exynos_drm_connector_detect,
.destroy = exynos_drm_connector_destroy,
......@@ -332,6 +373,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
exynos_connector->encoder_id = encoder->base.id;
exynos_connector->manager = manager;
exynos_connector->dpms = DRM_MODE_DPMS_OFF;
connector->encoder = encoder;
err = drm_mode_connector_attach_encoder(connector, encoder);
......
......@@ -31,4 +31,8 @@
struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
struct drm_encoder *encoder);
struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector);
void exynos_drm_display_power(struct drm_connector *connector, int mode);
#endif
......@@ -34,32 +34,14 @@
static LIST_HEAD(exynos_drm_subdrv_list);
static int exynos_drm_subdrv_probe(struct drm_device *dev,
static int exynos_drm_create_enc_conn(struct drm_device *dev,
struct exynos_drm_subdrv *subdrv)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
DRM_DEBUG_DRIVER("%s\n", __FILE__);
if (subdrv->probe) {
int ret;
/*
* this probe callback would be called by sub driver
* after setting of all resources to this sub driver,
* such as clock, irq and register map are done or by load()
* of exynos drm driver.
*
* P.S. note that this driver is considered for modularization.
*/
ret = subdrv->probe(dev, subdrv->dev);
if (ret)
return ret;
}
if (!subdrv->manager)
return 0;
DRM_DEBUG_DRIVER("%s\n", __FILE__);
subdrv->manager->dev = subdrv->dev;
......@@ -78,24 +60,22 @@ static int exynos_drm_subdrv_probe(struct drm_device *dev,
connector = exynos_drm_connector_create(dev, encoder);
if (!connector) {
DRM_ERROR("failed to create connector\n");
encoder->funcs->destroy(encoder);
return -EFAULT;
ret = -EFAULT;
goto err_destroy_encoder;
}
subdrv->encoder = encoder;
subdrv->connector = connector;
return 0;
err_destroy_encoder:
encoder->funcs->destroy(encoder);
return ret;
}
static void exynos_drm_subdrv_remove(struct drm_device *dev,
struct exynos_drm_subdrv *subdrv)
static void exynos_drm_destroy_enc_conn(struct exynos_drm_subdrv *subdrv)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
if (subdrv->remove)
subdrv->remove(dev);
if (subdrv->encoder) {
struct drm_encoder *encoder = subdrv->encoder;
encoder->funcs->destroy(encoder);
......@@ -109,9 +89,43 @@ static void exynos_drm_subdrv_remove(struct drm_device *dev,
}
}
static int exynos_drm_subdrv_probe(struct drm_device *dev,
struct exynos_drm_subdrv *subdrv)
{
if (subdrv->probe) {
int ret;
subdrv->drm_dev = dev;
/*
* this probe callback would be called by sub driver
* after setting of all resources to this sub driver,
* such as clock, irq and register map are done or by load()
* of exynos drm driver.
*
* P.S. note that this driver is considered for modularization.
*/
ret = subdrv->probe(dev, subdrv->dev);
if (ret)
return ret;
}
return 0;
}
static void exynos_drm_subdrv_remove(struct drm_device *dev,
struct exynos_drm_subdrv *subdrv)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
if (subdrv->remove)
subdrv->remove(dev, subdrv->dev);
}
int exynos_drm_device_register(struct drm_device *dev)
{
struct exynos_drm_subdrv *subdrv, *n;
unsigned int fine_cnt = 0;
int err;
DRM_DEBUG_DRIVER("%s\n", __FILE__);
......@@ -120,14 +134,36 @@ int exynos_drm_device_register(struct drm_device *dev)
return -EINVAL;
list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
subdrv->drm_dev = dev;
err = exynos_drm_subdrv_probe(dev, subdrv);
if (err) {
DRM_DEBUG("exynos drm subdrv probe failed.\n");
list_del(&subdrv->list);
continue;
}
/*
* if manager is null then it means that this sub driver
* doesn't need encoder and connector.
*/
if (!subdrv->manager) {
fine_cnt++;
continue;
}
err = exynos_drm_create_enc_conn(dev, subdrv);
if (err) {
DRM_DEBUG("failed to create encoder and connector.\n");
exynos_drm_subdrv_remove(dev, subdrv);
list_del(&subdrv->list);
continue;
}
fine_cnt++;
}
if (!fine_cnt)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_register);
......@@ -143,8 +179,10 @@ int exynos_drm_device_unregister(struct drm_device *dev)
return -EINVAL;
}
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list)
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
exynos_drm_subdrv_remove(dev, subdrv);
exynos_drm_destroy_enc_conn(subdrv);
}
return 0;
}
......
......@@ -66,7 +66,6 @@ struct exynos_drm_crtc {
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
......@@ -76,12 +75,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
return;
}
mutex_lock(&dev->struct_mutex);
exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms);
exynos_crtc->dpms = mode;
mutex_unlock(&dev->struct_mutex);
}
static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
......@@ -97,6 +92,7 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
exynos_plane_commit(exynos_crtc->plane);
exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
}
......@@ -126,8 +122,6 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
/*
* copy the mode data adjusted by mode_fixup() into crtc->mode
* so that hardware can be seet to proper mode.
......@@ -161,6 +155,12 @@ static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
DRM_DEBUG_KMS("%s\n", __FILE__);
/* when framebuffer changing is requested, crtc's dpms should be on */
if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
DRM_ERROR("failed framebuffer changing request.\n");
return -EPERM;
}
crtc_w = crtc->fb->width - x;
crtc_h = crtc->fb->height - y;
......@@ -213,6 +213,12 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
DRM_DEBUG_KMS("%s\n", __FILE__);
/* when the page flip is requested, crtc's dpms should be on */
if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
DRM_ERROR("failed page flip request.\n");
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
if (event) {
......
......@@ -36,6 +36,20 @@
#define MAX_FB_BUFFER 4
#define DEFAULT_ZPOS -1
#define _wait_for(COND, MS) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS)
struct drm_device;
struct exynos_drm_overlay;
struct drm_connector;
......@@ -60,6 +74,8 @@ enum exynos_drm_output_type {
* @commit: apply hardware specific overlay data to registers.
* @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay.
* @wait_for_vblank: wait for vblank interrupt to make sure that
* hardware overlay is disabled.
*/
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
......@@ -67,6 +83,7 @@ struct exynos_drm_overlay_ops {
void (*commit)(struct device *subdrv_dev, int zpos);
void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos);
void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
......@@ -265,7 +282,7 @@ struct exynos_drm_subdrv {
struct exynos_drm_manager *manager;
int (*probe)(struct drm_device *drm_dev, struct device *dev);
void (*remove)(struct drm_device *dev);
void (*remove)(struct drm_device *drm_dev, struct device *dev);
int (*open)(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file);
void (*close)(struct drm_device *drm_dev, struct device *dev,
......
......@@ -31,6 +31,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
#include "exynos_drm_connector.h"
#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
drm_encoder)
......@@ -44,26 +45,23 @@
* @dpms: store the encoder dpms value.
*/
struct exynos_drm_encoder {
struct drm_crtc *old_crtc;
struct drm_encoder drm_encoder;
struct exynos_drm_manager *manager;
int dpms;
};
static void exynos_drm_display_power(struct drm_encoder *encoder, int mode)
static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
struct exynos_drm_display_ops *display_ops =
manager->display_ops;
if (exynos_drm_best_encoder(connector) == encoder) {
DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
connector->base.id, mode);
if (display_ops && display_ops->power_on)
display_ops->power_on(manager->dev, mode);
exynos_drm_display_power(connector, mode);
}
}
}
......@@ -88,13 +86,13 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_ON:
if (manager_ops && manager_ops->apply)
manager_ops->apply(manager->dev);
exynos_drm_display_power(encoder, mode);
exynos_drm_connector_power(encoder, mode);
exynos_encoder->dpms = mode;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
exynos_drm_display_power(encoder, mode);
exynos_drm_connector_power(encoder, mode);
exynos_encoder->dpms = mode;
break;
default:
......@@ -127,24 +125,74 @@ exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
static void disable_plane_to_crtc(struct drm_device *dev,
struct drm_crtc *old_crtc,
struct drm_crtc *new_crtc)
{
struct drm_plane *plane;
/*
* if old_crtc isn't same as encoder->crtc then it means that
* user changed crtc id to another one so the plane to old_crtc
* should be disabled and plane->crtc should be set to new_crtc
* (encoder->crtc)
*/
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
if (plane->crtc == old_crtc) {
/*
* do not change below call order.
*
* plane->funcs->disable_plane call checks
* if encoder->crtc is same as plane->crtc and if same
* then overlay_ops->disable callback will be called
* to diasble current hw overlay so plane->crtc should
* have new_crtc because new_crtc was set to
* encoder->crtc in advance.
*/
plane->crtc = new_crtc;
plane->funcs->disable_plane(plane);
}
}
}
static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
struct exynos_drm_manager *manager;
struct exynos_drm_manager_ops *manager_ops;
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder)
if (connector->encoder == encoder) {
struct exynos_drm_encoder *exynos_encoder;
exynos_encoder = to_exynos_encoder(encoder);
if (exynos_encoder->old_crtc != encoder->crtc &&
exynos_encoder->old_crtc) {
/*
* disable a plane to old crtc and change
* crtc of the plane to new one.
*/
disable_plane_to_crtc(dev,
exynos_encoder->old_crtc,
encoder->crtc);
}
manager = exynos_drm_get_manager(encoder);
manager_ops = manager->ops;
if (manager_ops && manager_ops->mode_set)
manager_ops->mode_set(manager->dev,
adjusted_mode);
exynos_encoder->old_crtc = encoder->crtc;
}
}
}
......@@ -166,12 +214,27 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
manager_ops->commit(manager->dev);
}
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
struct drm_device *dev = encoder->dev;
exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
/* all planes connected to this encoder should be also disabled. */
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
if (plane->crtc == encoder->crtc)
plane->funcs->disable_plane(plane);
}
}
static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
.dpms = exynos_drm_encoder_dpms,
.mode_fixup = exynos_drm_encoder_mode_fixup,
.mode_set = exynos_drm_encoder_mode_set,
.prepare = exynos_drm_encoder_prepare,
.commit = exynos_drm_encoder_commit,
.disable = exynos_drm_encoder_disable,
};
static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
......@@ -337,6 +400,19 @@ void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
if (manager_ops && manager_ops->dpms)
manager_ops->dpms(manager->dev, mode);
/*
* set current mode to new one so that data aren't updated into
* registers by drm_helper_connector_dpms two times.
*
* in case that drm_crtc_helper_set_mode() is called,
* overlay_ops->commit() and manager_ops->commit() callbacks
* can be called two times, first at drm_crtc_helper_set_mode()
* and second at drm_helper_connector_dpms().
* so with this setting, when drm_helper_connector_dpms() is called
* encoder->funcs->dpms() will be ignored.
*/
exynos_encoder->dpms = mode;
/*
* if this condition is ok then it means that the crtc is already
* detached from encoder and last function for detaching is properly
......@@ -422,4 +498,14 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
/*
* wait for vblank interrupt
* - this makes sure that hardware overlay is disabled to avoid
* for the dma accesses to memory after gem buffer was released
* because the setting for disabling the overlay will be updated
* at vsync.
*/
if (overlay_ops->wait_for_vblank)
overlay_ops->wait_for_vblank(manager->dev);
}
......@@ -41,10 +41,12 @@
* exynos specific framebuffer structure.
*
* @fb: drm framebuffer obejct.
* @buf_cnt: a buffer count to drm framebuffer.
* @exynos_gem_obj: array of exynos specific gem object containing a gem object.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
unsigned int buf_cnt;
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
......@@ -101,6 +103,25 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
.dirty = exynos_drm_fb_dirty,
};
void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
unsigned int cnt)
{
struct exynos_drm_fb *exynos_fb;
exynos_fb = to_exynos_fb(fb);
exynos_fb->buf_cnt = cnt;
}
unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb;
exynos_fb = to_exynos_fb(fb);
return exynos_fb->buf_cnt;
}
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
......@@ -127,6 +148,43 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
return &exynos_fb->fb;
}
static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd)
{
unsigned int cnt = 0;
if (mode_cmd->pixel_format != DRM_FORMAT_NV12)
return drm_format_num_planes(mode_cmd->pixel_format);
while (cnt != MAX_FB_BUFFER) {
if (!mode_cmd->handles[cnt])
break;
cnt++;
}
/*
* check if NV12 or NV12M.
*
* NV12
* handles[0] = base1, offsets[0] = 0
* handles[1] = base1, offsets[1] = Y_size
*
* NV12M
* handles[0] = base1, offsets[0] = 0
* handles[1] = base2, offsets[1] = 0
*/
if (cnt == 2) {
/*
* in case of NV12 format, offsets[1] is not 0 and
* handles[0] is same as handles[1].
*/
if (mode_cmd->offsets[1] &&
mode_cmd->handles[0] == mode_cmd->handles[1])
cnt = 1;
}
return cnt;
}
static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
......@@ -134,7 +192,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
struct exynos_drm_fb *exynos_fb;
int nr;
int i;
DRM_DEBUG_KMS("%s\n", __FILE__);
......@@ -152,9 +209,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
exynos_fb = to_exynos_fb(fb);
nr = exynos_drm_format_num_buffers(fb->pixel_format);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < nr; i++) {
for (i = 1; i < exynos_fb->buf_cnt; i++) {
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
......
......@@ -28,19 +28,6 @@
#ifndef _EXYNOS_DRM_FB_H_
#define _EXYNOS_DRM_FB_H
static inline int exynos_drm_format_num_buffers(uint32_t format)
{
switch (format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV12MT:
return 2;
case DRM_FORMAT_YUV420:
return 3;
default:
return 1;
}
}
struct drm_framebuffer *
exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
......@@ -52,4 +39,11 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
void exynos_drm_mode_config_init(struct drm_device *dev);
/* set a buffer count to drm framebuffer. */
void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
unsigned int cnt);
/* get a buffer count to drm framebuffer. */
unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb);
#endif
......@@ -79,6 +79,9 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT;
}
/* buffer count to framebuffer always is 1 at booting time. */
exynos_drm_fb_set_buf_cnt(fb, 1);
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitches[0];
......
......@@ -57,6 +57,18 @@
#define get_fimd_context(dev) platform_get_drvdata(to_platform_device(dev))
struct fimd_driver_data {
unsigned int timing_base;
};
struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
};
struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
};
struct fimd_win_data {
unsigned int offset_x;
unsigned int offset_y;
......@@ -91,6 +103,13 @@ struct fimd_context {
struct exynos_drm_panel_info *panel;
};
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
{
return (struct fimd_driver_data *)
platform_get_device_id(pdev)->driver_data;
}
static bool fimd_display_is_connected(struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
......@@ -194,32 +213,35 @@ static void fimd_commit(struct device *dev)
struct fimd_context *ctx = get_fimd_context(dev);
struct exynos_drm_panel_info *panel = ctx->panel;
struct fb_videomode *timing = &panel->timing;
struct fimd_driver_data *driver_data;
struct platform_device *pdev = to_platform_device(dev);
u32 val;
driver_data = drm_fimd_get_driver_data(pdev);
if (ctx->suspended)
return;
DRM_DEBUG_KMS("%s\n", __FILE__);
/* setup polarity values from machine code. */
writel(ctx->vidcon1, ctx->regs + VIDCON1);
writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
/* setup vertical timing values. */
val = VIDTCON0_VBPD(timing->upper_margin - 1) |
VIDTCON0_VFPD(timing->lower_margin - 1) |
VIDTCON0_VSPW(timing->vsync_len - 1);
writel(val, ctx->regs + VIDTCON0);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
/* setup horizontal timing values. */
val = VIDTCON1_HBPD(timing->left_margin - 1) |
VIDTCON1_HFPD(timing->right_margin - 1) |
VIDTCON1_HSPW(timing->hsync_len - 1);
writel(val, ctx->regs + VIDTCON1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(timing->yres - 1) |
VIDTCON2_HOZVAL(timing->xres - 1);
writel(val, ctx->regs + VIDTCON2);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
/* setup clock source, clock divider, enable dma. */
val = ctx->vidcon0;
......@@ -570,10 +592,22 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data->enabled = false;
}
static void fimd_wait_for_vblank(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
int ret;
ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
VIDCON1_VSTATUS_VSYNC), 50);
if (ret < 0)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static struct exynos_drm_overlay_ops fimd_overlay_ops = {
.mode_set = fimd_win_mode_set,
.commit = fimd_win_commit,
.disable = fimd_win_disable,
.wait_for_vblank = fimd_wait_for_vblank,
};
static struct exynos_drm_manager fimd_manager = {
......@@ -678,7 +712,7 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
return 0;
}
static void fimd_subdrv_remove(struct drm_device *drm_dev)
static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
......@@ -747,16 +781,10 @@ static void fimd_clear_win(struct fimd_context *ctx, int win)
writel(val, ctx->regs + SHADOWCON);
}
static int fimd_power_on(struct fimd_context *ctx, bool enable)
static int fimd_clock(struct fimd_context *ctx, bool enable)
{
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
struct device *dev = subdrv->dev;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (enable != false && enable != true)
return -EINVAL;
if (enable) {
int ret;
......@@ -769,18 +797,31 @@ static int fimd_power_on(struct fimd_context *ctx, bool enable)
clk_disable(ctx->bus_clk);
return ret;
}
} else {
clk_disable(ctx->lcd_clk);
clk_disable(ctx->bus_clk);
}
return 0;
}
static int fimd_activate(struct fimd_context *ctx, bool enable)
{
if (enable) {
int ret;
struct device *dev = ctx->subdrv.dev;
ret = fimd_clock(ctx, true);
if (ret < 0)
return ret;
ctx->suspended = false;
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
fimd_enable_vblank(dev);
fimd_apply(dev);
} else {
clk_disable(ctx->lcd_clk);
clk_disable(ctx->bus_clk);
fimd_clock(ctx, false);
ctx->suspended = true;
}
......@@ -930,15 +971,15 @@ static int fimd_suspend(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
if (pm_runtime_suspended(dev))
return 0;
/*
* do not use pm_runtime_suspend(). if pm_runtime_suspend() is
* called here, an error would be returned by that interface
* because the usage_count of pm runtime is more than 1.
*/
return fimd_power_on(ctx, false);
if (!pm_runtime_suspended(dev))
return fimd_activate(ctx, false);
return 0;
}
static int fimd_resume(struct device *dev)
......@@ -950,8 +991,21 @@ static int fimd_resume(struct device *dev)
* of pm runtime would still be 1 so in this case, fimd driver
* should be on directly not drawing on pm runtime interface.
*/
if (!pm_runtime_suspended(dev))
return fimd_power_on(ctx, true);
if (pm_runtime_suspended(dev)) {
int ret;
ret = fimd_activate(ctx, true);
if (ret < 0)
return ret;
/*
* in case of dpms on(standby), fimd_apply function will
* be called by encoder's dpms callback to update fimd's
* registers but in case of sleep wakeup, it's not.
* so fimd_apply function should be called at here.
*/
fimd_apply(dev);
}
return 0;
}
......@@ -964,7 +1018,7 @@ static int fimd_runtime_suspend(struct device *dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
return fimd_power_on(ctx, false);
return fimd_activate(ctx, false);
}
static int fimd_runtime_resume(struct device *dev)
......@@ -973,10 +1027,22 @@ static int fimd_runtime_resume(struct device *dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
return fimd_power_on(ctx, true);
return fimd_activate(ctx, true);
}
#endif
static struct platform_device_id fimd_driver_ids[] = {
{
.name = "exynos4-fb",
.driver_data = (unsigned long)&exynos4_fimd_driver_data,
}, {
.name = "exynos5-fb",
.driver_data = (unsigned long)&exynos5_fimd_driver_data,
},
{},
};
MODULE_DEVICE_TABLE(platform, fimd_driver_ids);
static const struct dev_pm_ops fimd_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
......@@ -985,6 +1051,7 @@ static const struct dev_pm_ops fimd_pm_ops = {
struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = __devexit_p(fimd_remove),
.id_table = fimd_driver_ids,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
......
......@@ -122,6 +122,7 @@ struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
pid_t pid;
struct completion complete;
int async;
};
......@@ -164,8 +165,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return -ENOMEM;
}
node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node),
GFP_KERNEL);
node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
if (!node) {
dev_err(dev, "failed to allocate memory\n");
ret = -ENOMEM;
......@@ -679,6 +679,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
}
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d);
......
......@@ -29,6 +29,11 @@
#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
struct drm_hdmi_context, subdrv);
/* Common hdmi subdrv needs to access the hdmi and mixer though context.
* These should be initialied by the repective drivers */
static struct exynos_drm_hdmi_context *hdmi_ctx;
static struct exynos_drm_hdmi_context *mixer_ctx;
/* these callback points shoud be set by specific drivers. */
static struct exynos_hdmi_ops *hdmi_ops;
static struct exynos_mixer_ops *mixer_ops;
......@@ -41,6 +46,18 @@ struct drm_hdmi_context {
bool enabled[MIXER_WIN_NR];
};
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
{
if (ctx)
hdmi_ctx = ctx;
}
void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx)
{
if (ctx)
mixer_ctx = ctx;
}
void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
......@@ -274,10 +291,21 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
ctx->enabled[win] = false;
}
static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (mixer_ops && mixer_ops->wait_for_vblank)
mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
}
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
.mode_set = drm_mixer_mode_set,
.commit = drm_mixer_commit,
.disable = drm_mixer_disable,
.wait_for_vblank = drm_mixer_wait_for_vblank,
};
static struct exynos_drm_manager hdmi_manager = {
......@@ -292,46 +320,30 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
{
struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
struct drm_hdmi_context *ctx;
struct platform_device *pdev = to_platform_device(dev);
struct exynos_drm_common_hdmi_pd *pd;
DRM_DEBUG_KMS("%s\n", __FILE__);
pd = pdev->dev.platform_data;
if (!pd) {
DRM_DEBUG_KMS("platform data is null.\n");
return -EFAULT;
}
if (!pd->hdmi_dev) {
DRM_DEBUG_KMS("hdmi device is null.\n");
if (!hdmi_ctx) {
DRM_ERROR("hdmi context not initialized.\n");
return -EFAULT;
}
if (!pd->mixer_dev) {
DRM_DEBUG_KMS("mixer device is null.\n");
if (!mixer_ctx) {
DRM_ERROR("mixer context not initialized.\n");
return -EFAULT;
}
ctx = get_ctx_from_subdrv(subdrv);
ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
to_context(pd->hdmi_dev);
if (!ctx->hdmi_ctx) {
DRM_DEBUG_KMS("hdmi context is null.\n");
if (!ctx) {
DRM_ERROR("no drm hdmi context.\n");
return -EFAULT;
}
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx = (struct exynos_drm_hdmi_context *)
to_context(pd->mixer_dev);
if (!ctx->mixer_ctx) {
DRM_DEBUG_KMS("mixer context is null.\n");
return -EFAULT;
}
ctx->hdmi_ctx = hdmi_ctx;
ctx->mixer_ctx = mixer_ctx;
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev;
return 0;
......
......@@ -67,11 +67,14 @@ struct exynos_mixer_ops {
void (*dpms)(void *ctx, int mode);
/* overlay */
void (*wait_for_vblank)(void *ctx);
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
};
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx);
void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx);
void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops);
void exynos_mixer_ops_register(struct exynos_mixer_ops *ops);
#endif
......@@ -32,6 +32,42 @@ static const uint32_t formats[] = {
DRM_FORMAT_NV12MT,
};
/*
* This function is to get X or Y size shown via screen. This needs length and
* start position of CRTC.
*
* <--- length --->
* CRTC ----------------
* ^ start ^ end
*
* There are six cases from a to b.
*
* <----- SCREEN ----->
* 0 last
* ----------|------------------|----------
* CRTCs
* a -------
* b -------
* c --------------------------
* d --------
* e -------
* f -------
*/
static int exynos_plane_get_size(int start, unsigned length, unsigned last)
{
int end = start + length;
int size = 0;
if (start <= 0) {
if (end > 0)
size = min_t(unsigned, end, last);
} else if (start <= last) {
size = min_t(unsigned, last - start, length);
}
return size;
}
int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
......@@ -47,7 +83,7 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
nr = exynos_drm_format_num_buffers(fb->pixel_format);
nr = exynos_drm_fb_get_buf_cnt(fb);
for (i = 0; i < nr; i++) {
struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
......@@ -64,8 +100,24 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
(unsigned long)overlay->dma_addr[i]);
}
actual_w = min((unsigned)(crtc->mode.hdisplay - crtc_x), crtc_w);
actual_h = min((unsigned)(crtc->mode.vdisplay - crtc_y), crtc_h);
actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
actual_h = exynos_plane_get_size(crtc_y, crtc_h, crtc->mode.vdisplay);
if (crtc_x < 0) {
if (actual_w)
src_x -= crtc_x;
else
src_x += crtc_w;
crtc_x = 0;
}
if (crtc_y < 0) {
if (actual_h)
src_y -= crtc_y;
else
src_y += crtc_h;
crtc_y = 0;
}
/* set drm framebuffer data. */
overlay->fb_x = src_x;
......
......@@ -56,6 +56,7 @@ struct vidi_context {
unsigned int connected;
bool vblank_on;
bool suspended;
bool direct_vblank;
struct work_struct work;
struct mutex lock;
};
......@@ -224,6 +225,15 @@ static int vidi_enable_vblank(struct device *dev)
if (!test_and_set_bit(0, &ctx->irq_flags))
ctx->vblank_on = true;
ctx->direct_vblank = true;
/*
* in case of page flip request, vidi_finish_pageflip function
* will not be called because direct_vblank is true and then
* that function will be called by overlay_ops->commit callback
*/
schedule_work(&ctx->work);
return 0;
}
......@@ -425,7 +435,17 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
/* refresh rate is about 50Hz. */
usleep_range(16000, 20000);
mutex_lock(&ctx->lock);
if (ctx->direct_vblank) {
drm_handle_vblank(subdrv->drm_dev, manager->pipe);
ctx->direct_vblank = false;
mutex_unlock(&ctx->lock);
return;
}
mutex_unlock(&ctx->lock);
vidi_finish_pageflip(subdrv->drm_dev, manager->pipe);
}
......@@ -453,7 +473,7 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
return 0;
}
static void vidi_subdrv_remove(struct drm_device *drm_dev)
static void vidi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
......
......@@ -32,6 +32,9 @@
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/io.h>
#include <linux/of_gpio.h>
#include <plat/gpio-cfg.h>
#include <drm/exynos_drm.h>
......@@ -40,10 +43,18 @@
#include "exynos_hdmi.h"
#include <linux/gpio.h>
#include <media/s5p_hdmi.h>
#define MAX_WIDTH 1920
#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
};
struct hdmi_resources {
struct clk *hdmi;
struct clk *sclk_hdmi;
......@@ -59,13 +70,12 @@ struct hdmi_context {
struct drm_device *drm_dev;
bool hpd;
bool powered;
bool is_v13;
bool dvi_mode;
struct mutex hdmi_mutex;
void __iomem *regs;
unsigned int external_irq;
unsigned int internal_irq;
int external_irq;
int internal_irq;
struct i2c_client *ddc_port;
struct i2c_client *hdmiphy_port;
......@@ -76,8 +86,9 @@ struct hdmi_context {
struct hdmi_resources res;
void *parent_ctx;
void (*cfg_hpd)(bool external);
int (*get_hpd)(void);
int hpd_gpio;
enum hdmi_type type;
};
/* HDMI Version 1.3 */
......@@ -1209,7 +1220,7 @@ static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
{
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
hdmi_v13_regs_dump(hdata, prefix);
else
hdmi_v14_regs_dump(hdata, prefix);
......@@ -1250,7 +1261,7 @@ static int hdmi_v14_conf_index(struct drm_display_mode *mode)
static int hdmi_conf_index(struct hdmi_context *hdata,
struct drm_display_mode *mode)
{
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
return hdmi_v13_conf_index(mode);
return hdmi_v14_conf_index(mode);
......@@ -1346,7 +1357,7 @@ static int hdmi_check_timing(void *ctx, void *timing)
check_timing->yres, check_timing->refresh,
check_timing->vmode);
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
return hdmi_v13_check_timing(check_timing);
else
return hdmi_v14_check_timing(check_timing);
......@@ -1412,7 +1423,7 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
else
hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
......@@ -1516,7 +1527,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
{
u32 reg;
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
reg = HDMI_V13_CORE_RSTOUT;
else
reg = HDMI_CORE_RSTOUT;
......@@ -1530,12 +1541,9 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
/* enable HPD interrupts */
/* disable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
mdelay(10);
hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
/* choose HDMI mode */
hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
......@@ -1551,7 +1559,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
}
if (hdata->is_v13) {
if (hdata->type == HDMI_TYPE13) {
/* choose bluescreen (fecal) color */
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
......@@ -1833,7 +1841,7 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
static void hdmi_timing_apply(struct hdmi_context *hdata)
{
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
hdmi_v13_timing_apply(hdata);
else
hdmi_v14_timing_apply(hdata);
......@@ -1855,7 +1863,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
if (hdata->hdmiphy_port)
i2c_master_send(hdata->hdmiphy_port, buffer, 2);
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
reg = HDMI_V13_PHY_RSTOUT;
else
reg = HDMI_PHY_RSTOUT;
......@@ -1882,7 +1890,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
}
/* pixel clock */
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data;
else
hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data;
......@@ -1950,7 +1958,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
drm_mode_set_crtcinfo(adjusted_mode, 0);
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
index = hdmi_v13_conf_index(adjusted_mode);
else
index = hdmi_v14_conf_index(adjusted_mode);
......@@ -1964,7 +1972,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
* to adjusted_mode.
*/
list_for_each_entry(m, &connector->modes, head) {
if (hdata->is_v13)
if (hdata->type == HDMI_TYPE13)
index = hdmi_v13_conf_index(m);
else
index = hdmi_v14_conf_index(m);
......@@ -2024,8 +2032,6 @@ static void hdmi_poweron(struct hdmi_context *hdata)
hdata->powered = true;
if (hdata->cfg_hpd)
hdata->cfg_hpd(true);
mutex_unlock(&hdata->hdmi_mutex);
pm_runtime_get_sync(hdata->dev);
......@@ -2061,8 +2067,6 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
pm_runtime_put_sync(hdata->dev);
mutex_lock(&hdata->hdmi_mutex);
if (hdata->cfg_hpd)
hdata->cfg_hpd(false);
hdata->powered = false;
......@@ -2110,17 +2114,13 @@ static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
struct exynos_drm_hdmi_context *ctx = arg;
struct hdmi_context *hdata = ctx->ctx;
if (!hdata->get_hpd)
goto out;
mutex_lock(&hdata->hdmi_mutex);
hdata->hpd = hdata->get_hpd();
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
mutex_unlock(&hdata->hdmi_mutex);
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
out:
return IRQ_HANDLED;
}
......@@ -2143,18 +2143,9 @@ static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
HDMI_INTC_FLAG_HPD_PLUG);
}
mutex_lock(&hdata->hdmi_mutex);
hdata->hpd = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
if (hdata->powered && hdata->hpd) {
mutex_unlock(&hdata->hdmi_mutex);
goto out;
}
mutex_unlock(&hdata->hdmi_mutex);
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
out:
return IRQ_HANDLED;
}
......@@ -2262,18 +2253,89 @@ void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
hdmi_hdmiphy = hdmiphy;
}
#ifdef CONFIG_OF
static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
(struct device *dev)
{
struct device_node *np = dev->of_node;
struct s5p_hdmi_platform_data *pd;
enum of_gpio_flags flags;
u32 value;
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
if (!pd) {
DRM_ERROR("memory allocation for pdata failed\n");
goto err_data;
}
if (!of_find_property(np, "hpd-gpio", &value)) {
DRM_ERROR("no hpd gpio property found\n");
goto err_data;
}
pd->hpd_gpio = of_get_named_gpio_flags(np, "hpd-gpio", 0, &flags);
return pd;
err_data:
return NULL;
}
#else
static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
(struct device *dev)
{
return NULL;
}
#endif
static struct platform_device_id hdmi_driver_types[] = {
{
.name = "s5pv210-hdmi",
.driver_data = HDMI_TYPE13,
}, {
.name = "exynos4-hdmi",
.driver_data = HDMI_TYPE13,
}, {
.name = "exynos4-hdmi14",
.driver_data = HDMI_TYPE14,
}, {
.name = "exynos5-hdmi",
.driver_data = HDMI_TYPE14,
}, {
/* end node */
}
};
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
.data = (void *)HDMI_TYPE14,
}, {
/* end node */
}
};
static int __devinit hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
struct hdmi_context *hdata;
struct exynos_drm_hdmi_pdata *pdata;
struct s5p_hdmi_platform_data *pdata;
struct resource *res;
int ret;
DRM_DEBUG_KMS("[%d]\n", __LINE__);
if (pdev->dev.of_node) {
pdata = drm_hdmi_dt_parse_pdata(dev);
if (IS_ERR(pdata)) {
DRM_ERROR("failed to parse dt\n");
return PTR_ERR(pdata);
}
} else {
pdata = pdev->dev.platform_data;
}
if (!pdata) {
DRM_ERROR("no platform data specified\n");
return -EINVAL;
......@@ -2300,18 +2362,33 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drm_hdmi_ctx);
hdata->is_v13 = pdata->is_v13;
hdata->cfg_hpd = pdata->cfg_hpd;
hdata->get_hpd = pdata->get_hpd;
if (dev->of_node) {
const struct of_device_id *match;
match = of_match_node(of_match_ptr(hdmi_match_types),
pdev->dev.of_node);
hdata->type = (enum hdmi_type)match->data;
} else {
hdata->type = (enum hdmi_type)platform_get_device_id
(pdev)->driver_data;
}
hdata->hpd_gpio = pdata->hpd_gpio;
hdata->dev = dev;
ret = hdmi_resources_init(hdata);
if (ret) {
ret = -EINVAL;
DRM_ERROR("hdmi_resources_init failed\n");
goto err_data;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("failed to find registers\n");
ret = -ENOENT;
goto err_resource;
}
hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) {
......@@ -2320,11 +2397,17 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
goto err_resource;
}
ret = gpio_request(hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
goto err_resource;
}
/* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n");
ret = -ENOENT;
goto err_resource;
goto err_gpio;
}
hdata->ddc_port = hdmi_ddc;
......@@ -2338,32 +2421,31 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
hdata->hdmiphy_port = hdmi_hdmiphy;
hdata->external_irq = platform_get_irq_byname(pdev, "external_irq");
hdata->external_irq = gpio_to_irq(hdata->hpd_gpio);
if (hdata->external_irq < 0) {
DRM_ERROR("failed to get platform irq\n");
DRM_ERROR("failed to get GPIO external irq\n");
ret = hdata->external_irq;
goto err_hdmiphy;
}
hdata->internal_irq = platform_get_irq_byname(pdev, "internal_irq");
hdata->internal_irq = platform_get_irq(pdev, 0);
if (hdata->internal_irq < 0) {
DRM_ERROR("failed to get platform internal irq\n");
ret = hdata->internal_irq;
goto err_hdmiphy;
}
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
ret = request_threaded_irq(hdata->external_irq, NULL,
hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi_external", drm_hdmi_ctx);
if (ret) {
DRM_ERROR("failed to register hdmi internal interrupt\n");
DRM_ERROR("failed to register hdmi external interrupt\n");
goto err_hdmiphy;
}
if (hdata->cfg_hpd)
hdata->cfg_hpd(false);
ret = request_threaded_irq(hdata->internal_irq, NULL,
hdmi_internal_irq_thread, IRQF_ONESHOT,
"hdmi_internal", drm_hdmi_ctx);
......@@ -2372,6 +2454,9 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
goto err_free_irq;
}
/* Attach HDMI Driver to common hdmi. */
exynos_hdmi_drv_attach(drm_hdmi_ctx);
/* register specific callbacks to common hdmi. */
exynos_hdmi_ops_register(&hdmi_ops);
......@@ -2385,6 +2470,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
i2c_del_driver(&hdmiphy_driver);
err_ddc:
i2c_del_driver(&ddc_driver);
err_gpio:
gpio_free(hdata->hpd_gpio);
err_resource:
hdmi_resources_cleanup(hdata);
err_data:
......@@ -2402,6 +2489,9 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
free_irq(hdata->internal_irq, hdata);
free_irq(hdata->external_irq, hdata);
gpio_free(hdata->hpd_gpio);
hdmi_resources_cleanup(hdata);
......@@ -2447,9 +2537,11 @@ static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = __devexit_p(hdmi_remove),
.id_table = hdmi_driver_types,
.driver = {
.name = "exynos4-hdmi",
.name = "exynos-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
.of_match_table = hdmi_match_types,
},
};
......@@ -42,13 +42,23 @@ static int hdmiphy_remove(struct i2c_client *client)
static const struct i2c_device_id hdmiphy_id[] = {
{ "s5p_hdmiphy", 0 },
{ "exynos5-hdmiphy", 0 },
{ },
};
static struct of_device_id hdmiphy_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiphy",
}, {
/* end node */
}
};
struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "s5p-hdmiphy",
.name = "exynos-hdmiphy",
.owner = THIS_MODULE,
.of_match_table = hdmiphy_match_types,
},
.id_table = hdmiphy_id,
.probe = hdmiphy_probe,
......
......@@ -73,16 +73,28 @@ struct mixer_resources {
struct clk *sclk_dac;
};
enum mixer_version_id {
MXR_VER_0_0_0_16,
MXR_VER_16_0_33_0,
};
struct mixer_context {
struct device *dev;
int pipe;
bool interlace;
bool powered;
bool vp_enabled;
u32 int_en;
struct mutex mixer_mutex;
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
};
struct mixer_drv_data {
enum mixer_version_id version;
bool is_vp_enabled;
};
static const u8 filter_y_horiz_tap8[] = {
......@@ -251,6 +263,7 @@ static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
mixer_reg_writemask(res, MXR_STATUS, enable ?
MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
if (ctx->vp_enabled)
vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
VP_SHADOW_UPDATE_ENABLE : 0);
}
......@@ -333,8 +346,11 @@ static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
break;
case 2:
if (ctx->vp_enabled) {
vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE);
mixer_reg_writemask(res, MXR_CFG, val,
MXR_CFG_VP_ENABLE);
}
break;
}
}
......@@ -465,6 +481,18 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_regs_dump(ctx);
}
static void mixer_layer_update(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
u32 val;
val = mixer_reg_read(res, MXR_CFG);
/* allow one update per vsync only */
if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
......@@ -545,6 +573,11 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
mixer_cfg_scan(ctx, win_data->mode_height);
mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
mixer_cfg_layer(ctx, win, true);
/* layer update mandatory for mixer 16.0.33.0 */
if (ctx->mxr_ver == MXR_VER_16_0_33_0)
mixer_layer_update(ctx);
mixer_run(ctx);
mixer_vsync_set_update(ctx, true);
......@@ -592,6 +625,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
*/
val = MXR_LAYER_CFG_GRP1_VAL(3);
val |= MXR_LAYER_CFG_GRP0_VAL(2);
if (ctx->vp_enabled)
val |= MXR_LAYER_CFG_VP_VAL(1);
mixer_reg_write(res, MXR_LAYER_CFG, val);
......@@ -615,13 +649,16 @@ static void mixer_win_reset(struct mixer_context *ctx)
val = MXR_GRP_CFG_ALPHA_VAL(0);
mixer_reg_write(res, MXR_VIDEO_CFG, val);
if (ctx->vp_enabled) {
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
vp_default_filter(res);
}
/* disable all layers */
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
if (ctx->vp_enabled)
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
mixer_vsync_set_update(ctx, true);
......@@ -645,8 +682,10 @@ static void mixer_poweron(struct mixer_context *ctx)
pm_runtime_get_sync(ctx->dev);
clk_enable(res->mixer);
if (ctx->vp_enabled) {
clk_enable(res->vp);
clk_enable(res->sclk_mixer);
}
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
......@@ -666,8 +705,10 @@ static void mixer_poweroff(struct mixer_context *ctx)
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
clk_disable(res->mixer);
if (ctx->vp_enabled) {
clk_disable(res->vp);
clk_disable(res->sclk_mixer);
}
pm_runtime_put_sync(ctx->dev);
......@@ -726,6 +767,18 @@ static void mixer_dpms(void *ctx, int mode)
}
}
static void mixer_wait_for_vblank(void *ctx)
{
struct mixer_context *mixer_ctx = ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
int ret;
ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
MXR_INT_STATUS_VSYNC), 50);
if (ret < 0)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
......@@ -788,7 +841,7 @@ static void mixer_win_commit(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
if (win > 1)
if (win > 1 && mixer_ctx->vp_enabled)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
......@@ -818,6 +871,7 @@ static struct exynos_mixer_ops mixer_ops = {
.dpms = mixer_dpms,
/* overlay */
.wait_for_vblank = mixer_wait_for_vblank,
.win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
......@@ -923,39 +977,20 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
ret = -ENODEV;
goto fail;
}
mixer_res->vp = clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
ret = -ENODEV;
goto fail;
}
mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
ret = -ENODEV;
goto fail;
}
mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
ret = -ENODEV;
goto fail;
}
mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
ret = -ENODEV;
goto fail;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO;
goto fail;
}
clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->mixer_regs == NULL) {
......@@ -964,57 +999,126 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
goto fail;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
dev_err(dev, "get interrupt resource failed.\n");
ret = -ENXIO;
goto fail;
}
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
ret = -ENXIO;
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
goto fail;
}
mixer_res->irq = res->start;
return 0;
res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
fail:
if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
clk_put(mixer_res->sclk_hdmi);
if (!IS_ERR_OR_NULL(mixer_res->mixer))
clk_put(mixer_res->mixer);
return ret;
}
static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
struct platform_device *pdev)
{
struct mixer_context *mixer_ctx = ctx->ctx;
struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
int ret;
mixer_res->vp = clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
ret = -ENODEV;
goto fail;
}
mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
ret = -ENODEV;
goto fail;
}
mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
ret = -ENODEV;
goto fail;
}
if (mixer_res->sclk_hdmi)
clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO;
goto fail;
}
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
ret = -ENXIO;
goto fail;
}
mixer_res->irq = res->start;
return 0;
fail:
if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
clk_put(mixer_res->sclk_dac);
if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
clk_put(mixer_res->sclk_hdmi);
if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
clk_put(mixer_res->sclk_mixer);
if (!IS_ERR_OR_NULL(mixer_res->vp))
clk_put(mixer_res->vp);
if (!IS_ERR_OR_NULL(mixer_res->mixer))
clk_put(mixer_res->mixer);
return ret;
}
static struct mixer_drv_data exynos5_mxr_drv_data = {
.version = MXR_VER_16_0_33_0,
.is_vp_enabled = 0,
};
static struct mixer_drv_data exynos4_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
};
static struct platform_device_id mixer_driver_types[] = {
{
.name = "s5p-mixer",
.driver_data = (unsigned long)&exynos4_mxr_drv_data,
}, {
.name = "exynos5-mixer",
.driver_data = (unsigned long)&exynos5_mxr_drv_data,
}, {
/* end node */
}
};
static struct of_device_id mixer_match_types[] = {
{
.compatible = "samsung,exynos5-mixer",
.data = &exynos5_mxr_drv_data,
}, {
/* end node */
}
};
static int __devinit mixer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
struct mixer_context *ctx;
struct mixer_drv_data *drv;
int ret;
dev_info(dev, "probe start\n");
......@@ -1034,15 +1138,41 @@ static int __devinit mixer_probe(struct platform_device *pdev)
mutex_init(&ctx->mixer_mutex);
if (dev->of_node) {
const struct of_device_id *match;
match = of_match_node(of_match_ptr(mixer_match_types),
pdev->dev.of_node);
drv = match->data;
} else {
drv = (struct mixer_drv_data *)
platform_get_device_id(pdev)->driver_data;
}
ctx->dev = &pdev->dev;
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
platform_set_drvdata(pdev, drm_hdmi_ctx);
/* acquire resources: regs, irqs, clocks */
ret = mixer_resources_init(drm_hdmi_ctx, pdev);
if (ret)
if (ret) {
DRM_ERROR("mixer_resources_init failed\n");
goto fail;
}
if (ctx->vp_enabled) {
/* acquire vp resources: regs, irqs, clocks */
ret = vp_resources_init(drm_hdmi_ctx, pdev);
if (ret) {
DRM_ERROR("vp_resources_init failed\n");
goto fail;
}
}
/* attach mixer driver to common hdmi. */
exynos_mixer_drv_attach(drm_hdmi_ctx);
/* register specific callback point to common hdmi. */
exynos_mixer_ops_register(&mixer_ops);
......@@ -1082,10 +1212,12 @@ static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
struct platform_driver mixer_driver = {
.driver = {
.name = "s5p-mixer",
.name = "exynos-mixer",
.owner = THIS_MODULE,
.pm = &mixer_pm_ops,
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
.remove = __devexit_p(mixer_remove),
.id_table = mixer_driver_types,
};
......@@ -69,6 +69,7 @@
(((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
/* bits for MXR_STATUS */
#define MXR_STATUS_SOFT_RESET (1 << 8)
#define MXR_STATUS_16_BURST (1 << 7)
#define MXR_STATUS_BURST_MASK (1 << 7)
#define MXR_STATUS_BIG_ENDIAN (1 << 3)
......@@ -77,6 +78,8 @@
#define MXR_STATUS_REG_RUN (1 << 0)
/* bits for MXR_CFG */
#define MXR_CFG_LAYER_UPDATE (1 << 31)
#define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29)
#define MXR_CFG_RGB601_0_255 (0 << 9)
#define MXR_CFG_RGB601_16_235 (1 << 9)
#define MXR_CFG_RGB709_0_255 (2 << 9)
......
......@@ -1399,10 +1399,16 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
case 0:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
/*
* EBUSY is ok: this just means that another thread
* already did the job.
*/
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
default:
WARN_ON_ONCE(ret);
return VM_FAULT_SIGBUS;
}
}
......@@ -3217,10 +3223,6 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
enum i915_cache_level level;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
switch (args->caching) {
case I915_CACHING_NONE:
level = I915_CACHE_NONE;
......@@ -3232,6 +3234,10 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
......
......@@ -328,7 +328,7 @@ mi_set_context(struct intel_ring_buffer *ring,
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
ret = ring->flush(ring, 0, 0);
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
......
......@@ -91,7 +91,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_INFO(dev)->gen >= 6) {
if (IS_VALLEYVIEW(dev)) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
......
......@@ -697,12 +697,12 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
intel_opregion_gse_intr(dev);
for (i = 0; i < 3; i++) {
if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
drm_handle_vblank(dev, i);
if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
}
if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
drm_handle_vblank(dev, i);
}
/* check event from PCH */
......@@ -784,6 +784,12 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
if (de_iir & DE_PIPEA_VBLANK)
drm_handle_vblank(dev, 0);
if (de_iir & DE_PIPEB_VBLANK)
drm_handle_vblank(dev, 1);
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip_plane(dev, 0);
......@@ -794,12 +800,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip_plane(dev, 1);
}
if (de_iir & DE_PIPEA_VBLANK)
drm_handle_vblank(dev, 0);
if (de_iir & DE_PIPEB_VBLANK)
drm_handle_vblank(dev, 1);
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
......
......@@ -527,6 +527,9 @@
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
#define GEN6_GT_MODE 0x20d0
#define GEN6_GT_MODE_HI (1 << 9)
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
......
......@@ -2806,13 +2806,34 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
bool pending;
if (atomic_read(&dev_priv->mm.wedged))
return false;
spin_lock_irqsave(&dev->event_lock, flags);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
return pending;
}
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (crtc->fb == NULL)
return;
wait_event(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc));
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->fb);
mutex_unlock(&dev->struct_mutex);
......@@ -4370,7 +4391,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
/* default to 8bpc */
pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
if (is_dp) {
if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
pipeconf |= PIPECONF_BPP_6 |
PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
......@@ -4802,7 +4823,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
target_clock = adjusted_mode->clock;
/* determine panel color depth */
dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
adjusted_mode);
if (is_lvds && dev_priv->lvds_dither)
dither = true;
......@@ -6159,15 +6181,13 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
struct timeval tnow, tvbl;
struct timeval tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
do_gettimeofday(&tnow);
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
......@@ -6181,25 +6201,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
e = work->event;
e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
/* Called before vblank count and timestamps have
* been updated for the vblank interval of flip
* completion? Need to increment vblank count and
* add one videorefresh duration to returned timestamp
* to account for this. We assume this happened if we
* get called over 0.9 frame durations after the last
* timestamped vblank.
*
* This calculation can not be used with vrefresh rates
* below 5Hz (10Hz to be on the safe side) without
* promoting to 64 integers.
*/
if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
9 * crtc->framedur_ns) {
e->event.sequence++;
tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
crtc->framedur_ns);
}
e->event.tv_sec = tvbl.tv_sec;
e->event.tv_usec = tvbl.tv_usec;
......@@ -6216,9 +6217,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
if (atomic_read(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue);
wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
......
......@@ -36,6 +36,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
......@@ -1796,8 +1797,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count && voltage_tries == 5) {
++loop_tries;
if (loop_tries == 5) {
if (++loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
break;
}
......@@ -1807,15 +1807,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
}
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
if (voltage_tries == 5) {
DRM_DEBUG_KMS("too many voltage retries, give up\n");
break;
}
} else
voltage_tries = 0;
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
voltage_tries = 0;
} else
++voltage_tries;
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
......@@ -1963,12 +1959,25 @@ static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) &&
(intel_dp->dpcd[DP_DPCD_REV] != 0)) {
return true;
}
sizeof(intel_dp->dpcd)) == 0)
return false; /* aux transfer failed */
return false;
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
return true; /* no per-port downstream info */
if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
intel_dp->downstream_ports,
DP_MAX_DOWNSTREAM_PORTS) == 0)
return false; /* downstream port status fetch failed */
return true;
}
static void
......@@ -2068,11 +2077,43 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
}
}
/* XXX this is probably wrong for multiple downstream ports */
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
if (intel_dp_get_dpcd(intel_dp))
uint8_t *dpcd = intel_dp->dpcd;
bool hpd;
uint8_t type;
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
/* if there's no downstream port, we're done */
if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
return connector_status_connected;
/* If we're HPD-aware, SINK_COUNT changes dynamically */
hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
if (hpd) {
uint8_t reg;
if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
&reg, 1))
return connector_status_unknown;
return DP_GET_SINK_COUNT(reg) ? connector_status_connected
: connector_status_disconnected;
}
/* If no HPD, poke DDC gently */
if (drm_probe_ddc(&intel_dp->adapter))
return connector_status_connected;
/* Well we tried, say unknown for unreliable port types */
type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
return connector_status_unknown;
/* Anything else is out of spec, warn and ignore */
DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
return connector_status_disconnected;
}
......
......@@ -332,6 +332,7 @@ struct intel_hdmi {
};
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
......@@ -346,6 +347,7 @@ struct intel_dp {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
......
......@@ -3474,6 +3474,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
/* The default value should be 0x200 according to docs, but the two
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
......
header-y += drm.h
header-y += drm_fourcc.h
header-y += drm_mode.h
header-y += drm_sarea.h
header-y += exynos_drm.h
header-y += i810_drm.h
header-y += i915_drm.h
header-y += mga_drm.h
header-y += nouveau_drm.h
header-y += r128_drm.h
header-y += radeon_drm.h
header-y += savage_drm.h
header-y += sis_drm.h
header-y += via_drm.h
header-y += vmwgfx_drm.h
......@@ -878,6 +878,7 @@ extern char *drm_get_tv_subconnector_name(int val);
extern char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
......
......@@ -26,7 +26,19 @@
#include <linux/types.h>
#include <linux/i2c.h>
/* From the VESA DisplayPort spec */
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
* DP and DPCD versions are independent. Differences from 1.0 are not noted,
* 1.0 devices basically don't exist in the wild.
*
* Abbreviations, in chronological order:
*
* eDP: Embedded DisplayPort version 1
* DPI: DisplayPort Interoperability Guideline v1.1a
* 1.2: DisplayPort 1.2
*
* 1.2 formally includes both eDP and DPI definitions.
*/
#define AUX_NATIVE_WRITE 0x8
#define AUX_NATIVE_READ 0x9
......@@ -53,7 +65,7 @@
#define DP_MAX_LANE_COUNT 0x002
# define DP_MAX_LANE_COUNT_MASK 0x1f
# define DP_TPS3_SUPPORTED (1 << 6)
# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
# define DP_ENHANCED_FRAME_CAP (1 << 7)
#define DP_MAX_DOWNSPREAD 0x003
......@@ -69,19 +81,33 @@
/* 10b = TMDS or HDMI */
/* 11b = Other */
# define DP_FORMAT_CONVERSION (1 << 3)
# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
#define DP_DOWN_STREAM_PORT_COUNT 0x007
#define DP_PORT_COUNT_MASK 0x0f
#define DP_OUI_SUPPORT (1 << 7)
#define DP_EDP_CONFIGURATION_CAP 0x00d
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
#define DP_PSR_SUPPORT 0x070
# define DP_PORT_COUNT_MASK 0x0f
# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
# define DP_OUI_SUPPORT (1 << 7)
#define DP_I2C_SPEED_CAP 0x00c /* DPI */
# define DP_I2C_SPEED_1K 0x01
# define DP_I2C_SPEED_5K 0x02
# define DP_I2C_SPEED_10K 0x04
# define DP_I2C_SPEED_100K 0x08
# define DP_I2C_SPEED_400K 0x10
# define DP_I2C_SPEED_1M 0x20
#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
/* Multiple stream transport */
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
#define DP_PSR_CAPS 0x071
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
# define DP_PSR_SETUP_TIME_330 (0 << 1)
# define DP_PSR_SETUP_TIME_275 (1 << 1)
......@@ -93,11 +119,36 @@
# define DP_PSR_SETUP_TIME_MASK (7 << 1)
# define DP_PSR_SETUP_TIME_SHIFT 1
/*
* 0x80-0x8f describe downstream port capabilities, but there are two layouts
* based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
* each port's descriptor is one byte wide. If it was set, each port's is
* four bytes wide, starting with the one byte from the base info. As of
* DP interop v1.1a only VGA defines additional detail.
*/
/* offset 0 */
#define DP_DOWNSTREAM_PORT_0 0x80
# define DP_DS_PORT_TYPE_MASK (7 << 0)
# define DP_DS_PORT_TYPE_DP 0
# define DP_DS_PORT_TYPE_VGA 1
# define DP_DS_PORT_TYPE_DVI 2
# define DP_DS_PORT_TYPE_HDMI 3
# define DP_DS_PORT_TYPE_NON_EDID 4
# define DP_DS_PORT_HPD (1 << 3)
/* offset 1 for VGA is maximum megapixels per second / 8 */
/* offset 2 */
# define DP_DS_VGA_MAX_BPC_MASK (3 << 0)
# define DP_DS_VGA_8BPC 0
# define DP_DS_VGA_10BPC 1
# define DP_DS_VGA_12BPC 2
# define DP_DS_VGA_16BPC 3
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
......@@ -107,7 +158,7 @@
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
# define DP_TRAINING_PATTERN_3 3
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_MASK 0x3
# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
......@@ -148,24 +199,38 @@
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
# define DP_SET_ANSI_8B10B (1 << 0)
#define DP_PSR_EN_CFG 0x170
#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
/* bitmask as for DP_I2C_SPEED_CAP */
#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
#define DP_MSTM_CTRL 0x111 /* 1.2 */
# define DP_MST_EN (1 << 0)
# define DP_UP_REQ_EN (1 << 1)
# define DP_UPSTREAM_IS_SRC (1 << 2)
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
# define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3)
#define DP_SINK_COUNT 0x200
/* prior to 1.2 bit 7 was reserved mbz */
# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
# define DP_SINK_CP_READY (1 << 6)
#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
# define DP_CP_IRQ (1 << 2)
# define DP_SINK_SPECIFIC_IRQ (1 << 6)
#define DP_EDP_CONFIGURATION_SET 0x10a
#define DP_LANE0_1_STATUS 0x202
#define DP_LANE2_3_STATUS 0x203
# define DP_LANE_CR_DONE (1 << 0)
......@@ -225,14 +290,14 @@
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
#define DP_PSR_ERROR_STATUS 0x2006
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
#define DP_PSR_ESI 0x2007
#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
# define DP_PSR_CAPS_CHANGE (1 << 0)
#define DP_PSR_STATUS 0x2008
#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */
# define DP_PSR_SINK_INACTIVE 0
# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
# define DP_PSR_SINK_ACTIVE_RFB 2
......
......@@ -25,182 +25,10 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXYNOS_DRM_H_
#define _EXYNOS_DRM_H_
#include <drm/drm.h>
/**
* User-desired buffer creation information structure.
*
* @size: user-desired memory allocation size.
* - this size value would be page-aligned internally.
* @flags: user request for setting memory type or cache attributes.
* @handle: returned a handle to created gem object.
* - this handle will be set by gem module of kernel side.
*/
struct drm_exynos_gem_create {
uint64_t size;
unsigned int flags;
unsigned int handle;
};
/**
* A structure for getting buffer offset.
*
* @handle: a pointer to gem object created.
* @pad: just padding to be 64-bit aligned.
* @offset: relatived offset value of the memory region allocated.
* - this value should be set by user.
*/
struct drm_exynos_gem_map_off {
unsigned int handle;
unsigned int pad;
uint64_t offset;
};
/**
* A structure for mapping buffer.
*
* @handle: a handle to gem object created.
* @pad: just padding to be 64-bit aligned.
* @size: memory size to be mapped.
* @mapped: having user virtual address mmaped.
* - this variable would be filled by exynos gem module
* of kernel side with user virtual address which is allocated
* by do_mmap().
*/
struct drm_exynos_gem_mmap {
unsigned int handle;
unsigned int pad;
uint64_t size;
uint64_t mapped;
};
/**
* A structure to gem information.
*
* @handle: a handle to gem object created.
* @flags: flag value including memory type and cache attribute and
* this value would be set by driver.
* @size: size to memory region allocated by gem and this size would
* be set by driver.
*/
struct drm_exynos_gem_info {
unsigned int handle;
unsigned int flags;
uint64_t size;
};
/**
* A structure for user connection request of virtual display.
*
* @connection: indicate whether doing connetion or not by user.
* @extensions: if this value is 1 then the vidi driver would need additional
* 128bytes edid data.
* @edid: the edid data pointer from user side.
*/
struct drm_exynos_vidi_connection {
unsigned int connection;
unsigned int extensions;
uint64_t edid;
};
/* memory type definitions. */
enum e_drm_exynos_gem_mem_type {
/* Physically Continuous memory and used as default. */
EXYNOS_BO_CONTIG = 0 << 0,
/* Physically Non-Continuous memory. */
EXYNOS_BO_NONCONTIG = 1 << 0,
/* non-cachable mapping and used as default. */
EXYNOS_BO_NONCACHABLE = 0 << 1,
/* cachable mapping. */
EXYNOS_BO_CACHABLE = 1 << 1,
/* write-combine mapping. */
EXYNOS_BO_WC = 1 << 2,
EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE |
EXYNOS_BO_WC
};
struct drm_exynos_g2d_get_ver {
__u32 major;
__u32 minor;
};
struct drm_exynos_g2d_cmd {
__u32 offset;
__u32 data;
};
enum drm_exynos_g2d_event_type {
G2D_EVENT_NOT,
G2D_EVENT_NONSTOP,
G2D_EVENT_STOP, /* not yet */
};
struct drm_exynos_g2d_set_cmdlist {
__u64 cmd;
__u64 cmd_gem;
__u32 cmd_nr;
__u32 cmd_gem_nr;
/* for g2d event */
__u64 event_type;
__u64 user_data;
};
struct drm_exynos_g2d_exec {
__u64 async;
};
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
#define DRM_EXYNOS_GEM_MMAP 0x02
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
#define DRM_EXYNOS_GEM_GET 0x04
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
/* G2D */
#define DRM_EXYNOS_G2D_GET_VER 0x20
#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
#define DRM_EXYNOS_G2D_EXEC 0x22
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
#define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off)
#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
#define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver)
#define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist)
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
/* EXYNOS specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
struct drm_exynos_g2d_event {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
__u32 cmdlist_no;
__u32 reserved;
};
#ifdef __KERNEL__
#include <uapi/drm/exynos_drm.h>
/**
* A structure for lcd panel information.
......@@ -257,5 +85,4 @@ struct exynos_drm_hdmi_pdata {
int (*get_hpd)(void);
};
#endif /* __KERNEL__ */
#endif /* _EXYNOS_DRM_H_ */
......@@ -23,933 +23,15 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
#include <drm/drm.h>
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
#include <uapi/drm/i915_drm.h>
#ifdef __KERNEL__
/* For use by IPS driver */
extern unsigned long i915_read_mch_val(void);
extern bool i915_gpu_raise(void);
extern bool i915_gpu_lower(void);
extern bool i915_gpu_busy(void);
extern bool i915_gpu_turbo_disable(void);
#endif
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
* of chars for next/prev indices */
#define I915_LOG_MIN_TEX_REGION_SIZE 14
typedef struct _drm_i915_init {
enum {
I915_INIT_DMA = 0x01,
I915_CLEANUP_DMA = 0x02,
I915_RESUME_DMA = 0x03
} func;
unsigned int mmio_offset;
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
unsigned int ring_size;
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
unsigned int w;
unsigned int h;
unsigned int pitch;
unsigned int pitch_bits;
unsigned int back_pitch;
unsigned int depth_pitch;
unsigned int cpp;
unsigned int chipset;
} drm_i915_init_t;
typedef struct _drm_i915_sarea {
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int ctxOwner; /* last context to upload state */
int texAge;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int perf_boxes; /* performance boxes to be displayed */
int width, height; /* screen size in pixels */
drm_handle_t front_handle;
int front_offset;
int front_size;
drm_handle_t back_handle;
int back_offset;
int back_size;
drm_handle_t depth_handle;
int depth_offset;
int depth_size;
drm_handle_t tex_handle;
int tex_offset;
int tex_size;
int log_tex_granularity;
int pitch;
int rotation; /* 0, 90, 180 or 270 */
int rotated_offset;
int rotated_size;
int rotated_pitch;
int virtualX, virtualY;
unsigned int front_tiled;
unsigned int back_tiled;
unsigned int depth_tiled;
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
int pipeA_x;
int pipeA_y;
int pipeA_w;
int pipeA_h;
int pipeB_x;
int pipeB_y;
int pipeB_w;
int pipeB_h;
/* fill out some space for old userspace triple buffer */
drm_handle_t unused_handle;
__u32 unused1, unused2, unused3;
/* buffer object handles for static buffers. May change
* over the lifetime of the client.
*/
__u32 front_bo_handle;
__u32 back_bo_handle;
__u32 unused_bo_handle;
__u32 depth_bo_handle;
} drm_i915_sarea_t;
/* due to userspace building against these headers we need some compat here */
#define planeA_x pipeA_x
#define planeA_y pipeA_y
#define planeA_w pipeA_w
#define planeA_h pipeA_h
#define planeB_x pipeB_x
#define planeB_y pipeB_y
#define planeB_w pipeB_w
#define planeB_h pipeB_h
/* Flags for perf_boxes
*/
#define I915_BOX_RING_EMPTY 0x1
#define I915_BOX_FLIP 0x2
#define I915_BOX_WAIT 0x4
#define I915_BOX_TEXTURE_LOAD 0x8
#define I915_BOX_LOST_CONTEXT 0x10
/* I915 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_I915_INIT 0x00
#define DRM_I915_FLUSH 0x01
#define DRM_I915_FLIP 0x02
#define DRM_I915_BATCHBUFFER 0x03
#define DRM_I915_IRQ_EMIT 0x04
#define DRM_I915_IRQ_WAIT 0x05
#define DRM_I915_GETPARAM 0x06
#define DRM_I915_SETPARAM 0x07
#define DRM_I915_ALLOC 0x08
#define DRM_I915_FREE 0x09
#define DRM_I915_INIT_HEAP 0x0a
#define DRM_I915_CMDBUFFER 0x0b
#define DRM_I915_DESTROY_HEAP 0x0c
#define DRM_I915_SET_VBLANK_PIPE 0x0d
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_GEM_INIT 0x13
#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_I915_GEM_PIN 0x15
#define DRM_I915_GEM_UNPIN 0x16
#define DRM_I915_GEM_BUSY 0x17
#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_I915_GEM_ENTERVT 0x19
#define DRM_I915_GEM_LEAVEVT 0x1a
#define DRM_I915_GEM_CREATE 0x1b
#define DRM_I915_GEM_PREAD 0x1c
#define DRM_I915_GEM_PWRITE 0x1d
#define DRM_I915_GEM_MMAP 0x1e
#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_I915_GEM_SW_FINISH 0x20
#define DRM_I915_GEM_SET_TILING 0x21
#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
#define DRM_I915_GEM_MADVISE 0x26
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
#define DRM_I915_GEM_WAIT 0x2c
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
#define DRM_I915_GEM_SET_CACHING 0x2f
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
*/
typedef struct drm_i915_batchbuffer {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
/* As above, but pass a pointer to userspace buffer which can be
* validated by the kernel prior to sending to hardware.
*/
typedef struct _drm_i915_cmdbuffer {
char __user *buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
/* Userspace can request & wait on irq's:
*/
typedef struct drm_i915_irq_emit {
int __user *irq_seq;
} drm_i915_irq_emit_t;
typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_HAS_PAGEFLIPPING 8
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
#define I915_PARAM_HAS_BLT 11
#define I915_PARAM_HAS_RELAXED_FENCING 12
#define I915_PARAM_HAS_COHERENT_RINGS 13
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
#define I915_PARAM_HAS_SEMAPHORES 20
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
typedef struct drm_i915_getparam {
int param;
int __user *value;
} drm_i915_getparam_t;
/* Ioctl to set kernel params:
*/
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
#define I915_SETPARAM_NUM_USED_FENCES 4
typedef struct drm_i915_setparam {
int param;
int value;
} drm_i915_setparam_t;
/* A memory manager for regions of shared memory:
*/
#define I915_MEM_REGION_AGP 1
typedef struct drm_i915_mem_alloc {
int region;
int alignment;
int size;
int __user *region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc_t;
typedef struct drm_i915_mem_free {
int region;
int region_offset;
} drm_i915_mem_free_t;
typedef struct drm_i915_mem_init_heap {
int region;
int size;
int start;
} drm_i915_mem_init_heap_t;
/* Allow memory manager to be torn down and re-initialized (eg on
* rotate):
*/
typedef struct drm_i915_mem_destroy_heap {
int region;
} drm_i915_mem_destroy_heap_t;
/* Allow X server to configure which pipes to monitor for vblank signals
*/
#define DRM_I915_VBLANK_PIPE_A 1
#define DRM_I915_VBLANK_PIPE_B 2
typedef struct drm_i915_vblank_pipe {
int pipe;
} drm_i915_vblank_pipe_t;
/* Schedule buffer swap at given vertical blank:
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
enum drm_vblank_seq_type seqtype;
unsigned int sequence;
} drm_i915_vblank_swap_t;
typedef struct drm_i915_hws_addr {
__u64 addr;
} drm_i915_hws_addr_t;
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_end;
};
struct drm_i915_gem_create {
/**
* Requested size for the object.
*
* The (page-aligned) allocated size for the object will be returned.
*/
__u64 size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
__u32 pad;
};
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;
__u32 pad;
/** Offset into the object to read from */
__u64 offset;
/** Length of data to read */
__u64 size;
/**
* Pointer to write the data into.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
struct drm_i915_gem_pwrite {
/** Handle for the object being written to. */
__u32 handle;
__u32 pad;
/** Offset into the object to write to */
__u64 offset;
/** Length of data to write */
__u64 size;
/**
* Pointer to read the data from.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
struct drm_i915_gem_mmap {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/** Offset in the object to map. */
__u64 offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
__u64 size;
/**
* Returned pointer the data was mapped at.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
};
struct drm_i915_gem_mmap_gtt {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
struct drm_i915_gem_set_domain {
/** Handle for the object */
__u32 handle;
/** New read domains */
__u32 read_domains;
/** New write domain */
__u32 write_domain;
};
struct drm_i915_gem_sw_finish {
/** Handle for the object */
__u32 handle;
};
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
*
* It's appealing to make this be an index into the mm_validate_entry
* list to refer to the buffer, but this allows the driver to create
* a relocation list for state buffers and not re-write it per
* exec using the buffer.
*/
__u32 target_handle;
/**
* Value to be added to the offset of the target buffer to make up
* the relocation entry.
*/
__u32 delta;
/** Offset in the buffer the relocation entry will be written into */
__u64 offset;
/**
* Offset value of the target buffer that the relocation entry was last
* written as.
*
* If the buffer has the same offset as last time, we can skip syncing
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
__u64 presumed_offset;
/**
* Target memory domains read by this operation.
*/
__u32 read_domains;
/**
* Target memory domains written by this operation.
*
* Note that only one domain may be written by the whole
* execbuffer operation, so that where there are conflicts,
* the application will get -EINVAL back.
*/
__u32 write_domain;
};
/** @{
* Intel memory domains
*
* Most of these just align with the various caches in
* the system and are used to flush and invalidate as
* objects end up cached in different domains.
*/
/** CPU cache */
#define I915_GEM_DOMAIN_CPU 0x00000001
/** Render cache, used by 2D and 3D drawing */
#define I915_GEM_DOMAIN_RENDER 0x00000002
/** Sampler cache, used by texture engine */
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
/** Command queue, used to load batch buffers */
#define I915_GEM_DOMAIN_COMMAND 0x00000008
/** Instruction cache, used by shader programs */
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
/** Vertex address cache */
#define I915_GEM_DOMAIN_VERTEX 0x00000020
/** GTT domain - aperture and scanout */
#define I915_GEM_DOMAIN_GTT 0x00000040
/** @} */
struct drm_i915_gem_exec_object {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
/** Required alignment in graphics aperture */
__u64 alignment;
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
};
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
* performend on them.
*
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
*
* These buffers must be listed in an order such that all relocations
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
__u64 buffers_ptr;
__u32 buffer_count;
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
};
struct drm_i915_gem_exec_object2 {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
/** Required alignment in graphics aperture */
__u64 alignment;
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;
};
struct drm_i915_gem_execbuffer2 {
/**
* List of gem_exec_object2 structs
*/
__u64 buffers_ptr;
__u32 buffer_count;
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (7<<0)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
#define I915_EXEC_BLT (3<<0)
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
* Gen6+ only supports relative addressing to dynamic state (default) and
* absolute addressing.
*
* These flags are ignored for the BSD and BLT rings.
*/
#define I915_EXEC_CONSTANTS_MASK (3<<6)
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
__u64 rsvd1; /* now used for context info */
__u64 rsvd2;
};
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
#define i915_execbuffer2_get_context_id(eb2) \
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
__u32 pad;
/** alignment required within the aperture */
__u64 alignment;
/** Returned GTT offset of the buffer. */
__u64 offset;
};
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
__u32 handle;
__u32 pad;
};
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
__u32 handle;
/** Return busy status (1 if busy, 0 if idle).
* The high word is used to indicate on which rings the object
* currently resides:
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
*/
__u32 busy;
};
#define I915_CACHING_NONE 0
#define I915_CACHING_CACHED 1
struct drm_i915_gem_caching {
/**
* Handle of the buffer to set/get the caching level of. */
__u32 handle;
/**
* Cacheing level to apply or return value
*
* bits0-15 are for generic caching control (i.e. the above defined
* values). bits16-31 are reserved for platform-specific variations
* (e.g. l3$ caching on gen7). */
__u32 caching;
};
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
#define I915_BIT_6_SWIZZLE_NONE 0
#define I915_BIT_6_SWIZZLE_9 1
#define I915_BIT_6_SWIZZLE_9_10 2
#define I915_BIT_6_SWIZZLE_9_11 3
#define I915_BIT_6_SWIZZLE_9_10_11 4
/* Not seen by userland */
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
/* Seen by userland. */
#define I915_BIT_6_SWIZZLE_9_17 6
#define I915_BIT_6_SWIZZLE_9_10_17 7
struct drm_i915_gem_set_tiling {
/** Handle of the buffer to have its tiling state updated */
__u32 handle;
/**
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*
* This value is to be set on request, and will be updated by the
* kernel on successful return with the actual chosen tiling layout.
*
* The tiling mode may be demoted to I915_TILING_NONE when the system
* has bit 6 swizzling that can't be managed correctly by GEM.
*
* Buffer contents become undefined when changing tiling_mode.
*/
__u32 tiling_mode;
/**
* Stride in bytes for the object when in I915_TILING_X or
* I915_TILING_Y.
*/
__u32 stride;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
struct drm_i915_gem_get_tiling {
/** Handle of the buffer to get tiling state for. */
__u32 handle;
/**
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*/
__u32 tiling_mode;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
struct drm_i915_gem_get_aperture {
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
__u64 aper_size;
/**
* Available space in the aperture used by i915_gem_execbuffer, in
* bytes
*/
__u64 aper_available_size;
};
struct drm_i915_get_pipe_from_crtc_id {
/** ID of CRTC being requested **/
__u32 crtc_id;
/** pipe of requested CRTC **/
__u32 pipe;
};
#define I915_MADV_WILLNEED 0
#define I915_MADV_DONTNEED 1
#define __I915_MADV_PURGED 2 /* internal state */
struct drm_i915_gem_madvise {
/** Handle of the buffer to change the backing store advice */
__u32 handle;
/* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure.
*/
__u32 madv;
/** Whether the backing store still exists. */
__u32 retained;
};
/* flags */
#define I915_OVERLAY_TYPE_MASK 0xff
#define I915_OVERLAY_YUV_PLANAR 0x01
#define I915_OVERLAY_YUV_PACKED 0x02
#define I915_OVERLAY_RGB 0x03
#define I915_OVERLAY_DEPTH_MASK 0xff00
#define I915_OVERLAY_RGB24 0x1000
#define I915_OVERLAY_RGB16 0x2000
#define I915_OVERLAY_RGB15 0x3000
#define I915_OVERLAY_YUV422 0x0100
#define I915_OVERLAY_YUV411 0x0200
#define I915_OVERLAY_YUV420 0x0300
#define I915_OVERLAY_YUV410 0x0400
#define I915_OVERLAY_SWAP_MASK 0xff0000
#define I915_OVERLAY_NO_SWAP 0x000000
#define I915_OVERLAY_UV_SWAP 0x010000
#define I915_OVERLAY_Y_SWAP 0x020000
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
#define I915_OVERLAY_FLAGS_MASK 0xff000000
#define I915_OVERLAY_ENABLE 0x01000000
struct drm_intel_overlay_put_image {
/* various flags and src format description */
__u32 flags;
/* source picture description */
__u32 bo_handle;
/* stride values and offsets are in bytes, buffer relative */
__u16 stride_Y; /* stride for packed formats */
__u16 stride_UV;
__u32 offset_Y; /* offset for packet formats */
__u32 offset_U;
__u32 offset_V;
/* in pixels */
__u16 src_width;
__u16 src_height;
/* to compensate the scaling factors for partially covered surfaces */
__u16 src_scan_width;
__u16 src_scan_height;
/* output crtc description */
__u32 crtc_id;
__u16 dst_x;
__u16 dst_y;
__u16 dst_width;
__u16 dst_height;
};
/* flags */
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
struct drm_intel_overlay_attrs {
__u32 flags;
__u32 color_key;
__s32 brightness;
__u32 contrast;
__u32 saturation;
__u32 gamma0;
__u32 gamma1;
__u32 gamma2;
__u32 gamma3;
__u32 gamma4;
__u32 gamma5;
};
/*
* Intel sprite handling
*
* Color keying works with a min/mask/max tuple. Both source and destination
* color keying is allowed.
*
* Source keying:
* Sprite pixels within the min & max values, masked against the color channels
* specified in the mask field, will be transparent. All other pixels will
* be displayed on top of the primary plane. For RGB surfaces, only the min
* and mask fields will be used; ranged compares are not allowed.
*
* Destination keying:
* Primary plane pixels that match the min value, masked against the color
* channels specified in the mask field, will be replaced by corresponding
* pixels from the sprite plane.
*
* Note that source & destination keying are exclusive; only one can be
* active on a given plane.
*/
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
__u32 plane_id;
__u32 min_value;
__u32 channel_mask;
__u32 max_value;
__u32 flags;
};
struct drm_i915_gem_wait {
/** Handle of BO we shall wait on */
__u32 bo_handle;
__u32 flags;
/** Number of nanoseconds to wait, Returns time remaining. */
__s64 timeout_ns;
};
struct drm_i915_gem_context_create {
/* output: id of new context*/
__u32 ctx_id;
__u32 pad;
};
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
struct drm_i915_reg_read {
__u64 offset;
__u64 val; /* Return value */
};
#endif /* _I915_DRM_H_ */
......@@ -20,6 +20,7 @@ struct i2c_board_info;
* @hdmiphy_info: template for HDMIPHY I2C device
* @mhl_bus: controller id for MHL control bus
* @mhl_info: template for MHL I2C device
* @hpd_gpio: GPIO for Hot-Plug-Detect pin
*
* NULL pointer for *_info fields indicates that
* the corresponding chip is not present
......@@ -29,6 +30,7 @@ struct s5p_hdmi_platform_data {
struct i2c_board_info *hdmiphy_info;
int mhl_bus;
struct i2c_board_info *mhl_info;
int hpd_gpio;
};
#endif /* S5P_HDMI_H */
......
# UAPI Header export list
header-y += drm.h
header-y += drm_fourcc.h
header-y += drm_mode.h
header-y += drm_sarea.h
header-y += exynos_drm.h
header-y += i810_drm.h
header-y += i915_drm.h
header-y += mga_drm.h
header-y += nouveau_drm.h
header-y += r128_drm.h
header-y += radeon_drm.h
header-y += savage_drm.h
header-y += sis_drm.h
header-y += via_drm.h
header-y += vmwgfx_drm.h
/* exynos_drm.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authors:
* Inki Dae <inki.dae@samsung.com>
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _UAPI_EXYNOS_DRM_H_
#define _UAPI_EXYNOS_DRM_H_
#include <drm/drm.h>
/**
* User-desired buffer creation information structure.
*
* @size: user-desired memory allocation size.
* - this size value would be page-aligned internally.
* @flags: user request for setting memory type or cache attributes.
* @handle: returned a handle to created gem object.
* - this handle will be set by gem module of kernel side.
*/
struct drm_exynos_gem_create {
uint64_t size;
unsigned int flags;
unsigned int handle;
};
/**
* A structure for getting buffer offset.
*
* @handle: a pointer to gem object created.
* @pad: just padding to be 64-bit aligned.
* @offset: relatived offset value of the memory region allocated.
* - this value should be set by user.
*/
struct drm_exynos_gem_map_off {
unsigned int handle;
unsigned int pad;
uint64_t offset;
};
/**
* A structure for mapping buffer.
*
* @handle: a handle to gem object created.
* @pad: just padding to be 64-bit aligned.
* @size: memory size to be mapped.
* @mapped: having user virtual address mmaped.
* - this variable would be filled by exynos gem module
* of kernel side with user virtual address which is allocated
* by do_mmap().
*/
struct drm_exynos_gem_mmap {
unsigned int handle;
unsigned int pad;
uint64_t size;
uint64_t mapped;
};
/**
* A structure to gem information.
*
* @handle: a handle to gem object created.
* @flags: flag value including memory type and cache attribute and
* this value would be set by driver.
* @size: size to memory region allocated by gem and this size would
* be set by driver.
*/
struct drm_exynos_gem_info {
unsigned int handle;
unsigned int flags;
uint64_t size;
};
/**
* A structure for user connection request of virtual display.
*
* @connection: indicate whether doing connetion or not by user.
* @extensions: if this value is 1 then the vidi driver would need additional
* 128bytes edid data.
* @edid: the edid data pointer from user side.
*/
struct drm_exynos_vidi_connection {
unsigned int connection;
unsigned int extensions;
uint64_t edid;
};
/* memory type definitions. */
enum e_drm_exynos_gem_mem_type {
/* Physically Continuous memory and used as default. */
EXYNOS_BO_CONTIG = 0 << 0,
/* Physically Non-Continuous memory. */
EXYNOS_BO_NONCONTIG = 1 << 0,
/* non-cachable mapping and used as default. */
EXYNOS_BO_NONCACHABLE = 0 << 1,
/* cachable mapping. */
EXYNOS_BO_CACHABLE = 1 << 1,
/* write-combine mapping. */
EXYNOS_BO_WC = 1 << 2,
EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE |
EXYNOS_BO_WC
};
struct drm_exynos_g2d_get_ver {
__u32 major;
__u32 minor;
};
struct drm_exynos_g2d_cmd {
__u32 offset;
__u32 data;
};
enum drm_exynos_g2d_event_type {
G2D_EVENT_NOT,
G2D_EVENT_NONSTOP,
G2D_EVENT_STOP, /* not yet */
};
struct drm_exynos_g2d_set_cmdlist {
__u64 cmd;
__u64 cmd_gem;
__u32 cmd_nr;
__u32 cmd_gem_nr;
/* for g2d event */
__u64 event_type;
__u64 user_data;
};
struct drm_exynos_g2d_exec {
__u64 async;
};
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
#define DRM_EXYNOS_GEM_MMAP 0x02
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
#define DRM_EXYNOS_GEM_GET 0x04
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
/* G2D */
#define DRM_EXYNOS_G2D_GET_VER 0x20
#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
#define DRM_EXYNOS_G2D_EXEC 0x22
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
#define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off)
#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
#define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver)
#define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist)
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
/* EXYNOS specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
struct drm_exynos_g2d_event {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
__u32 cmdlist_no;
__u32 reserved;
};
#endif /* _UAPI_EXYNOS_DRM_H_ */
/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _UAPI_I915_DRM_H_
#define _UAPI_I915_DRM_H_
#include <drm/drm.h>
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
* of chars for next/prev indices */
#define I915_LOG_MIN_TEX_REGION_SIZE 14
typedef struct _drm_i915_init {
enum {
I915_INIT_DMA = 0x01,
I915_CLEANUP_DMA = 0x02,
I915_RESUME_DMA = 0x03
} func;
unsigned int mmio_offset;
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
unsigned int ring_size;
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
unsigned int w;
unsigned int h;
unsigned int pitch;
unsigned int pitch_bits;
unsigned int back_pitch;
unsigned int depth_pitch;
unsigned int cpp;
unsigned int chipset;
} drm_i915_init_t;
typedef struct _drm_i915_sarea {
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int ctxOwner; /* last context to upload state */
int texAge;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int perf_boxes; /* performance boxes to be displayed */
int width, height; /* screen size in pixels */
drm_handle_t front_handle;
int front_offset;
int front_size;
drm_handle_t back_handle;
int back_offset;
int back_size;
drm_handle_t depth_handle;
int depth_offset;
int depth_size;
drm_handle_t tex_handle;
int tex_offset;
int tex_size;
int log_tex_granularity;
int pitch;
int rotation; /* 0, 90, 180 or 270 */
int rotated_offset;
int rotated_size;
int rotated_pitch;
int virtualX, virtualY;
unsigned int front_tiled;
unsigned int back_tiled;
unsigned int depth_tiled;
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
int pipeA_x;
int pipeA_y;
int pipeA_w;
int pipeA_h;
int pipeB_x;
int pipeB_y;
int pipeB_w;
int pipeB_h;
/* fill out some space for old userspace triple buffer */
drm_handle_t unused_handle;
__u32 unused1, unused2, unused3;
/* buffer object handles for static buffers. May change
* over the lifetime of the client.
*/
__u32 front_bo_handle;
__u32 back_bo_handle;
__u32 unused_bo_handle;
__u32 depth_bo_handle;
} drm_i915_sarea_t;
/* due to userspace building against these headers we need some compat here */
#define planeA_x pipeA_x
#define planeA_y pipeA_y
#define planeA_w pipeA_w
#define planeA_h pipeA_h
#define planeB_x pipeB_x
#define planeB_y pipeB_y
#define planeB_w pipeB_w
#define planeB_h pipeB_h
/* Flags for perf_boxes
*/
#define I915_BOX_RING_EMPTY 0x1
#define I915_BOX_FLIP 0x2
#define I915_BOX_WAIT 0x4
#define I915_BOX_TEXTURE_LOAD 0x8
#define I915_BOX_LOST_CONTEXT 0x10
/* I915 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_I915_INIT 0x00
#define DRM_I915_FLUSH 0x01
#define DRM_I915_FLIP 0x02
#define DRM_I915_BATCHBUFFER 0x03
#define DRM_I915_IRQ_EMIT 0x04
#define DRM_I915_IRQ_WAIT 0x05
#define DRM_I915_GETPARAM 0x06
#define DRM_I915_SETPARAM 0x07
#define DRM_I915_ALLOC 0x08
#define DRM_I915_FREE 0x09
#define DRM_I915_INIT_HEAP 0x0a
#define DRM_I915_CMDBUFFER 0x0b
#define DRM_I915_DESTROY_HEAP 0x0c
#define DRM_I915_SET_VBLANK_PIPE 0x0d
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_GEM_INIT 0x13
#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_I915_GEM_PIN 0x15
#define DRM_I915_GEM_UNPIN 0x16
#define DRM_I915_GEM_BUSY 0x17
#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_I915_GEM_ENTERVT 0x19
#define DRM_I915_GEM_LEAVEVT 0x1a
#define DRM_I915_GEM_CREATE 0x1b
#define DRM_I915_GEM_PREAD 0x1c
#define DRM_I915_GEM_PWRITE 0x1d
#define DRM_I915_GEM_MMAP 0x1e
#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_I915_GEM_SW_FINISH 0x20
#define DRM_I915_GEM_SET_TILING 0x21
#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
#define DRM_I915_GEM_MADVISE 0x26
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
#define DRM_I915_GEM_WAIT 0x2c
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
#define DRM_I915_GEM_SET_CACHING 0x2f
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
*/
typedef struct drm_i915_batchbuffer {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
/* As above, but pass a pointer to userspace buffer which can be
* validated by the kernel prior to sending to hardware.
*/
typedef struct _drm_i915_cmdbuffer {
char __user *buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
/* Userspace can request & wait on irq's:
*/
typedef struct drm_i915_irq_emit {
int __user *irq_seq;
} drm_i915_irq_emit_t;
typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_HAS_PAGEFLIPPING 8
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
#define I915_PARAM_HAS_BLT 11
#define I915_PARAM_HAS_RELAXED_FENCING 12
#define I915_PARAM_HAS_COHERENT_RINGS 13
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
#define I915_PARAM_HAS_SEMAPHORES 20
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
typedef struct drm_i915_getparam {
int param;
int __user *value;
} drm_i915_getparam_t;
/* Ioctl to set kernel params:
*/
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
#define I915_SETPARAM_NUM_USED_FENCES 4
typedef struct drm_i915_setparam {
int param;
int value;
} drm_i915_setparam_t;
/* A memory manager for regions of shared memory:
*/
#define I915_MEM_REGION_AGP 1
typedef struct drm_i915_mem_alloc {
int region;
int alignment;
int size;
int __user *region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc_t;
typedef struct drm_i915_mem_free {
int region;
int region_offset;
} drm_i915_mem_free_t;
typedef struct drm_i915_mem_init_heap {
int region;
int size;
int start;
} drm_i915_mem_init_heap_t;
/* Allow memory manager to be torn down and re-initialized (eg on
* rotate):
*/
typedef struct drm_i915_mem_destroy_heap {
int region;
} drm_i915_mem_destroy_heap_t;
/* Allow X server to configure which pipes to monitor for vblank signals
*/
#define DRM_I915_VBLANK_PIPE_A 1
#define DRM_I915_VBLANK_PIPE_B 2
typedef struct drm_i915_vblank_pipe {
int pipe;
} drm_i915_vblank_pipe_t;
/* Schedule buffer swap at given vertical blank:
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
enum drm_vblank_seq_type seqtype;
unsigned int sequence;
} drm_i915_vblank_swap_t;
typedef struct drm_i915_hws_addr {
__u64 addr;
} drm_i915_hws_addr_t;
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_end;
};
struct drm_i915_gem_create {
/**
* Requested size for the object.
*
* The (page-aligned) allocated size for the object will be returned.
*/
__u64 size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
__u32 pad;
};
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;
__u32 pad;
/** Offset into the object to read from */
__u64 offset;
/** Length of data to read */
__u64 size;
/**
* Pointer to write the data into.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
struct drm_i915_gem_pwrite {
/** Handle for the object being written to. */
__u32 handle;
__u32 pad;
/** Offset into the object to write to */
__u64 offset;
/** Length of data to write */
__u64 size;
/**
* Pointer to read the data from.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
struct drm_i915_gem_mmap {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/** Offset in the object to map. */
__u64 offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
__u64 size;
/**
* Returned pointer the data was mapped at.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
};
struct drm_i915_gem_mmap_gtt {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
struct drm_i915_gem_set_domain {
/** Handle for the object */
__u32 handle;
/** New read domains */
__u32 read_domains;
/** New write domain */
__u32 write_domain;
};
struct drm_i915_gem_sw_finish {
/** Handle for the object */
__u32 handle;
};
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
*
* It's appealing to make this be an index into the mm_validate_entry
* list to refer to the buffer, but this allows the driver to create
* a relocation list for state buffers and not re-write it per
* exec using the buffer.
*/
__u32 target_handle;
/**
* Value to be added to the offset of the target buffer to make up
* the relocation entry.
*/
__u32 delta;
/** Offset in the buffer the relocation entry will be written into */
__u64 offset;
/**
* Offset value of the target buffer that the relocation entry was last
* written as.
*
* If the buffer has the same offset as last time, we can skip syncing
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
__u64 presumed_offset;
/**
* Target memory domains read by this operation.
*/
__u32 read_domains;
/**
* Target memory domains written by this operation.
*
* Note that only one domain may be written by the whole
* execbuffer operation, so that where there are conflicts,
* the application will get -EINVAL back.
*/
__u32 write_domain;
};
/** @{
* Intel memory domains
*
* Most of these just align with the various caches in
* the system and are used to flush and invalidate as
* objects end up cached in different domains.
*/
/** CPU cache */
#define I915_GEM_DOMAIN_CPU 0x00000001
/** Render cache, used by 2D and 3D drawing */
#define I915_GEM_DOMAIN_RENDER 0x00000002
/** Sampler cache, used by texture engine */
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
/** Command queue, used to load batch buffers */
#define I915_GEM_DOMAIN_COMMAND 0x00000008
/** Instruction cache, used by shader programs */
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
/** Vertex address cache */
#define I915_GEM_DOMAIN_VERTEX 0x00000020
/** GTT domain - aperture and scanout */
#define I915_GEM_DOMAIN_GTT 0x00000040
/** @} */
struct drm_i915_gem_exec_object {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
/** Required alignment in graphics aperture */
__u64 alignment;
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
};
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
* performend on them.
*
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
*
* These buffers must be listed in an order such that all relocations
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
__u64 buffers_ptr;
__u32 buffer_count;
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
};
struct drm_i915_gem_exec_object2 {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
/** Required alignment in graphics aperture */
__u64 alignment;
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;
};
struct drm_i915_gem_execbuffer2 {
/**
* List of gem_exec_object2 structs
*/
__u64 buffers_ptr;
__u32 buffer_count;
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (7<<0)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
#define I915_EXEC_BLT (3<<0)
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
* Gen6+ only supports relative addressing to dynamic state (default) and
* absolute addressing.
*
* These flags are ignored for the BSD and BLT rings.
*/
#define I915_EXEC_CONSTANTS_MASK (3<<6)
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
__u64 rsvd1; /* now used for context info */
__u64 rsvd2;
};
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
#define i915_execbuffer2_get_context_id(eb2) \
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
__u32 pad;
/** alignment required within the aperture */
__u64 alignment;
/** Returned GTT offset of the buffer. */
__u64 offset;
};
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
__u32 handle;
__u32 pad;
};
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
__u32 handle;
/** Return busy status (1 if busy, 0 if idle).
* The high word is used to indicate on which rings the object
* currently resides:
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
*/
__u32 busy;
};
#define I915_CACHING_NONE 0
#define I915_CACHING_CACHED 1
struct drm_i915_gem_caching {
/**
* Handle of the buffer to set/get the caching level of. */
__u32 handle;
/**
* Cacheing level to apply or return value
*
* bits0-15 are for generic caching control (i.e. the above defined
* values). bits16-31 are reserved for platform-specific variations
* (e.g. l3$ caching on gen7). */
__u32 caching;
};
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
#define I915_BIT_6_SWIZZLE_NONE 0
#define I915_BIT_6_SWIZZLE_9 1
#define I915_BIT_6_SWIZZLE_9_10 2
#define I915_BIT_6_SWIZZLE_9_11 3
#define I915_BIT_6_SWIZZLE_9_10_11 4
/* Not seen by userland */
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
/* Seen by userland. */
#define I915_BIT_6_SWIZZLE_9_17 6
#define I915_BIT_6_SWIZZLE_9_10_17 7
struct drm_i915_gem_set_tiling {
/** Handle of the buffer to have its tiling state updated */
__u32 handle;
/**
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*
* This value is to be set on request, and will be updated by the
* kernel on successful return with the actual chosen tiling layout.
*
* The tiling mode may be demoted to I915_TILING_NONE when the system
* has bit 6 swizzling that can't be managed correctly by GEM.
*
* Buffer contents become undefined when changing tiling_mode.
*/
__u32 tiling_mode;
/**
* Stride in bytes for the object when in I915_TILING_X or
* I915_TILING_Y.
*/
__u32 stride;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
struct drm_i915_gem_get_tiling {
/** Handle of the buffer to get tiling state for. */
__u32 handle;
/**
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*/
__u32 tiling_mode;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
struct drm_i915_gem_get_aperture {
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
__u64 aper_size;
/**
* Available space in the aperture used by i915_gem_execbuffer, in
* bytes
*/
__u64 aper_available_size;
};
struct drm_i915_get_pipe_from_crtc_id {
/** ID of CRTC being requested **/
__u32 crtc_id;
/** pipe of requested CRTC **/
__u32 pipe;
};
#define I915_MADV_WILLNEED 0
#define I915_MADV_DONTNEED 1
#define __I915_MADV_PURGED 2 /* internal state */
struct drm_i915_gem_madvise {
/** Handle of the buffer to change the backing store advice */
__u32 handle;
/* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure.
*/
__u32 madv;
/** Whether the backing store still exists. */
__u32 retained;
};
/* flags */
#define I915_OVERLAY_TYPE_MASK 0xff
#define I915_OVERLAY_YUV_PLANAR 0x01
#define I915_OVERLAY_YUV_PACKED 0x02
#define I915_OVERLAY_RGB 0x03
#define I915_OVERLAY_DEPTH_MASK 0xff00
#define I915_OVERLAY_RGB24 0x1000
#define I915_OVERLAY_RGB16 0x2000
#define I915_OVERLAY_RGB15 0x3000
#define I915_OVERLAY_YUV422 0x0100
#define I915_OVERLAY_YUV411 0x0200
#define I915_OVERLAY_YUV420 0x0300
#define I915_OVERLAY_YUV410 0x0400
#define I915_OVERLAY_SWAP_MASK 0xff0000
#define I915_OVERLAY_NO_SWAP 0x000000
#define I915_OVERLAY_UV_SWAP 0x010000
#define I915_OVERLAY_Y_SWAP 0x020000
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
#define I915_OVERLAY_FLAGS_MASK 0xff000000
#define I915_OVERLAY_ENABLE 0x01000000
struct drm_intel_overlay_put_image {
/* various flags and src format description */
__u32 flags;
/* source picture description */
__u32 bo_handle;
/* stride values and offsets are in bytes, buffer relative */
__u16 stride_Y; /* stride for packed formats */
__u16 stride_UV;
__u32 offset_Y; /* offset for packet formats */
__u32 offset_U;
__u32 offset_V;
/* in pixels */
__u16 src_width;
__u16 src_height;
/* to compensate the scaling factors for partially covered surfaces */
__u16 src_scan_width;
__u16 src_scan_height;
/* output crtc description */
__u32 crtc_id;
__u16 dst_x;
__u16 dst_y;
__u16 dst_width;
__u16 dst_height;
};
/* flags */
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
struct drm_intel_overlay_attrs {
__u32 flags;
__u32 color_key;
__s32 brightness;
__u32 contrast;
__u32 saturation;
__u32 gamma0;
__u32 gamma1;
__u32 gamma2;
__u32 gamma3;
__u32 gamma4;
__u32 gamma5;
};
/*
* Intel sprite handling
*
* Color keying works with a min/mask/max tuple. Both source and destination
* color keying is allowed.
*
* Source keying:
* Sprite pixels within the min & max values, masked against the color channels
* specified in the mask field, will be transparent. All other pixels will
* be displayed on top of the primary plane. For RGB surfaces, only the min
* and mask fields will be used; ranged compares are not allowed.
*
* Destination keying:
* Primary plane pixels that match the min value, masked against the color
* channels specified in the mask field, will be replaced by corresponding
* pixels from the sprite plane.
*
* Note that source & destination keying are exclusive; only one can be
* active on a given plane.
*/
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
__u32 plane_id;
__u32 min_value;
__u32 channel_mask;
__u32 max_value;
__u32 flags;
};
struct drm_i915_gem_wait {
/** Handle of BO we shall wait on */
__u32 bo_handle;
__u32 flags;
/** Number of nanoseconds to wait, Returns time remaining. */
__s64 timeout_ns;
};
struct drm_i915_gem_context_create {
/* output: id of new context*/
__u32 ctx_id;
__u32 pad;
};
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
struct drm_i915_reg_read {
__u64 offset;
__u64 val; /* Return value */
};
#endif /* _UAPI_I915_DRM_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment