Commit e6b46ee7 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-vmware-next' into drm-core-next

* drm-vmware-next:
  drm/vmwgfx: Bump minor and driver date
  drm/vmwgfx: Save at least one screen layout
  drm/vmwgfx: Add modinfo version
  drm/vmwgfx: Add a parameter to get the max fb size
  drm/vmwgfx: Don't flush fb if we're in the suspended state.
  drm/vmwgfx: Prune modes based on available VRAM size
  drm/vmwgfx: Take the ttm lock around the dirty ioctl
  drm: vmwgfx: Add a struct drm_file parameter to the dirty framebuffer callback
  drm/vmwgfx: Add new-style PM hooks to improve hibernation behavior
  drm/vmwgfx: Fix ACPI S3 & S4 functionality.
  drm/vmwgfx: Really support other depths than 32
parents fb7ba211 8aea5287
......@@ -1854,7 +1854,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (fb->funcs->dirty) {
ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
} else {
ret = -ENOSYS;
goto out_err2;
......
......@@ -597,6 +597,8 @@ static void vmw_lastclose(struct drm_device *dev)
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
INIT_LIST_HEAD(&vmaster->fb_surf);
mutex_init(&vmaster->fb_surf_mutex);
}
static int vmw_master_create(struct drm_device *dev,
......@@ -608,7 +610,7 @@ static int vmw_master_create(struct drm_device *dev,
if (unlikely(vmaster == NULL))
return -ENOMEM;
ttm_lock_init(&vmaster->lock);
vmw_master_init(vmaster);
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
master->driver_priv = vmaster;
......@@ -699,6 +701,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_kms_idle_workqueues(vmaster);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
......@@ -751,15 +754,16 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* Buffer contents is moved to swappable memory.
*/
ttm_bo_swapout_all(&dev_priv->bdev);
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
case PM_POST_RESTORE:
ttm_suspend_unlock(&vmaster->lock);
break;
case PM_RESTORE_PREPARE:
break;
case PM_POST_RESTORE:
break;
default:
break;
}
......@@ -770,21 +774,98 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* These might not be needed with the virtual SVGA device.
*/
int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
if (dev_priv->num_3d_resources != 0) {
DRM_INFO("Can't suspend or hibernate "
"while 3D resources are active.\n");
return -EBUSY;
}
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
int vmw_pci_resume(struct pci_dev *pdev)
static int vmw_pci_resume(struct pci_dev *pdev)
{
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
return pci_enable_device(pdev);
}
static int vmw_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct pm_message dummy;
dummy.event = 0;
return vmw_pci_suspend(pdev, dummy);
}
static int vmw_pm_resume(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
return vmw_pci_resume(pdev);
}
static int vmw_pm_prepare(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
/**
* Release 3d reference held by fbdev and potentially
* stop fifo.
*/
dev_priv->suspended = true;
if (dev_priv->enable_fb)
vmw_3d_resource_dec(dev_priv);
if (dev_priv->num_3d_resources != 0) {
DRM_INFO("Can't suspend or hibernate "
"while 3D resources are active.\n");
if (dev_priv->enable_fb)
vmw_3d_resource_inc(dev_priv);
dev_priv->suspended = false;
return -EBUSY;
}
return 0;
}
static void vmw_pm_complete(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
/**
* Reclaim 3d reference held by fbdev and potentially
* start fifo.
*/
if (dev_priv->enable_fb)
vmw_3d_resource_inc(dev_priv);
dev_priv->suspended = false;
}
static const struct dev_pm_ops vmw_pm_ops = {
.prepare = vmw_pm_prepare,
.complete = vmw_pm_complete,
.suspend = vmw_pm_suspend,
.resume = vmw_pm_resume,
};
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
......@@ -824,8 +905,9 @@ static struct drm_driver driver = {
.id_table = vmw_pci_id_list,
.probe = vmw_probe,
.remove = vmw_remove,
.suspend = vmw_pci_suspend,
.resume = vmw_pci_resume
.driver = {
.pm = &vmw_pm_ops
}
},
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
......@@ -860,3 +942,7 @@ module_exit(vmwgfx_exit);
MODULE_AUTHOR("VMware Inc. and others");
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
MODULE_LICENSE("GPL and additional rights");
MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
__stringify(VMWGFX_DRIVER_MINOR) "."
__stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
"0");
......@@ -39,9 +39,9 @@
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
#define VMWGFX_DRIVER_DATE "20100209"
#define VMWGFX_DRIVER_DATE "20100927"
#define VMWGFX_DRIVER_MAJOR 1
#define VMWGFX_DRIVER_MINOR 2
#define VMWGFX_DRIVER_MINOR 4
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
......@@ -151,6 +151,8 @@ struct vmw_overlay;
struct vmw_master {
struct ttm_lock lock;
struct mutex fb_surf_mutex;
struct list_head fb_surf;
};
struct vmw_vga_topology_state {
......@@ -286,6 +288,7 @@ struct vmw_private {
struct vmw_master *active_master;
struct vmw_master fbdev_master;
struct notifier_block pm_nb;
bool suspended;
struct mutex release_mutex;
uint32_t num_3d_resources;
......@@ -518,6 +521,10 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned bbp, unsigned depth);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
/**
......
......@@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
if (!vmw_kms_validate_mode_vram(vmw_priv,
info->fix.line_length,
var->yoffset + var->yres)) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
}
return 0;
}
......@@ -205,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
SVGAFifoCmdUpdate body;
} *cmd;
if (vmw_priv->suspended)
return;
spin_lock_irqsave(&par->dirty.lock, flags);
if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, flags);
......
......@@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_CAPS:
param->value = dev_priv->fifo.capabilities;
break;
case DRM_VMW_PARAM_MAX_FB_SIZE:
param->value = dev_priv->vram_size;
break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
......
......@@ -332,18 +332,55 @@ struct vmw_framebuffer_surface {
struct delayed_work d_work;
struct mutex work_lock;
bool present_fs;
struct list_head head;
struct drm_master *master;
};
/**
* vmw_kms_idle_workqueues - Flush workqueues on this master
*
* @vmaster - Pointer identifying the master, for the surfaces of which
* we idle the dirty work queues.
*
* This function should be called with the ttm lock held in exclusive mode
* to idle all dirty work queues before the fifo is taken down.
*
* The work task may actually requeue itself, but after the flush returns we're
* sure that there's nothing to present, since the ttm lock is held in
* exclusive mode, so the fifo will never get used.
*/
void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
{
struct vmw_framebuffer_surface *entry;
mutex_lock(&vmaster->fb_surf_mutex);
list_for_each_entry(entry, &vmaster->fb_surf, head) {
if (cancel_delayed_work_sync(&entry->d_work))
(void) entry->d_work.work.func(&entry->d_work.work);
(void) cancel_delayed_work_sync(&entry->d_work);
}
mutex_unlock(&vmaster->fb_surf_mutex);
}
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfb =
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct vmw_master *vmaster = vmw_master(vfbs->master);
mutex_lock(&vmaster->fb_surf_mutex);
list_del(&vfbs->head);
mutex_unlock(&vmaster->fb_surf_mutex);
cancel_delayed_work_sync(&vfb->d_work);
cancel_delayed_work_sync(&vfbs->d_work);
drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfb->surface);
vmw_surface_unreference(&vfbs->surface);
kfree(framebuffer);
kfree(vfbs);
}
static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
......@@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
SVGA3dCopyRect cr;
} *cmd;
/**
* Strictly we should take the ttm_lock in read mode before accessing
* the fifo, to make sure the fifo is present and up. However,
* instead we flush all workqueues under the ttm lock in exclusive mode
* before taking down the fifo.
*/
mutex_lock(&vfbs->work_lock);
if (!vfbs->present_fs)
goto out_unlock;
......@@ -392,17 +435,20 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct vmw_surface *surf = vfbs->surface;
struct drm_clip_rect norect;
SVGA3dCopyRect *cr;
int i, inc = 1;
int ret;
struct {
SVGA3dCmdHeader header;
......@@ -410,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
SVGA3dCopyRect cr;
} *cmd;
if (unlikely(vfbs->master != file_priv->master))
return -EINVAL;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
if (!num_clips ||
!(dev_priv->fifo.capabilities &
SVGA_FIFO_CAP_SCREEN_OBJECT)) {
......@@ -425,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
*/
vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
}
ttm_read_unlock(&vmaster->lock);
return 0;
}
......@@ -442,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
ttm_read_unlock(&vmaster->lock);
return -ENOMEM;
}
......@@ -461,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
}
vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
ttm_read_unlock(&vmaster->lock);
return 0;
}
......@@ -471,16 +526,57 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.create_handle = vmw_framebuffer_create_handle,
};
int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
unsigned width, unsigned height)
const struct drm_mode_fb_cmd
*mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
/*
* Sanity checks.
*/
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
surface->sizes[0].height < mode_cmd->height ||
surface->sizes[0].depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
}
switch (mode_cmd->depth) {
case 32:
format = SVGA3D_A8R8G8B8;
break;
case 24:
format = SVGA3D_X8R8G8B8;
break;
case 16:
format = SVGA3D_R5G6B5;
break;
case 15:
format = SVGA3D_A1R5G5B5;
break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
if (unlikely(format != surface->format)) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
......@@ -498,16 +594,22 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
}
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = 32;
vfbs->base.base.pitch = width * 32 / 4;
vfbs->base.base.depth = 24;
vfbs->base.base.width = width;
vfbs->base.base.height = height;
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
vfbs->base.base.pitch = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
vfbs->base.pin = &vmw_surface_dmabuf_pin;
vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
vfbs->master = drm_master_get(file_priv->master);
mutex_init(&vfbs->work_lock);
mutex_lock(&vmaster->fb_surf_mutex);
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
list_add_tail(&vfbs->head, &vmaster->fb_surf);
mutex_unlock(&vmaster->fb_surf_mutex);
*out = &vfbs->base;
return 0;
......@@ -544,18 +646,25 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
}
int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_clip_rect norect;
int ret;
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
int i, increment = 1;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
if (!num_clips) {
num_clips = 1;
clips = &norect;
......@@ -570,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
ttm_read_unlock(&vmaster->lock);
return -ENOMEM;
}
......@@ -582,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
}
vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
ttm_read_unlock(&vmaster->lock);
return 0;
}
......@@ -659,16 +770,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
}
int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
unsigned width, unsigned height)
const struct drm_mode_fb_cmd
*mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd;
unsigned int requested_size;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitch;
if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
}
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
......@@ -685,12 +805,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err3;
}
/* XXX get the first 3 from the surface info */
vfbd->base.base.bits_per_pixel = 32;
vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
vfbd->base.base.depth = 24;
vfbd->base.base.width = width;
vfbd->base.base.height = height;
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
vfbd->base.base.pitch = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
vfbd->buffer = dmabuf;
......@@ -719,8 +838,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
u64 required_size;
int ret;
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
* requested framebuffer.
*/
required_size = mode_cmd->pitch * mode_cmd->height;
if (unlikely(required_size > (u64) dev_priv->vram_size)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return NULL;
}
/**
* End conditioned code.
*/
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
mode_cmd->handle, &surface);
if (ret)
......@@ -729,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (!surface->scanout)
goto err_not_scanout;
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd->width, mode_cmd->height);
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
&vfb, mode_cmd);
/* vmw_user_surface_lookup takes one ref so does new_fb */
vmw_surface_unreference(&surface);
......@@ -751,7 +887,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
mode_cmd->width, mode_cmd->height);
mode_cmd);
/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
vmw_dmabuf_unreference(&bo);
......@@ -889,6 +1025,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
vmw_priv->num_displays = vmw_read(vmw_priv,
SVGA_REG_NUM_GUEST_DISPLAYS);
if (vmw_priv->num_displays == 0)
vmw_priv->num_displays = 1;
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
......@@ -997,6 +1136,13 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
return ret;
}
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
}
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
{
return 0;
......
......@@ -427,7 +427,9 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
{
struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
......@@ -443,6 +445,8 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
mode->hdisplay = ldu->pref_width;
mode->vdisplay = ldu->pref_height;
mode->vrefresh = drm_mode_vrefresh(mode);
if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
mode->vdisplay)) {
drm_mode_probed_add(connector, mode);
if (ldu->pref_mode) {
......@@ -452,13 +456,19 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
ldu->pref_mode = mode;
}
}
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
vmw_ldu_connector_builtin[i].vdisplay > max_height)
bmode = &vmw_ldu_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
continue;
if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
bmode->vdisplay))
continue;
mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
mode = drm_mode_duplicate(dev, bmode);
if (!mode)
return 0;
mode->vrefresh = drm_mode_vrefresh(mode);
......
......@@ -221,7 +221,8 @@ struct drm_framebuffer_funcs {
* the semantics and arguments have a one to one mapping
* on this function.
*/
int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
int (*dirty)(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv, unsigned flags,
unsigned color, struct drm_clip_rect *clips,
unsigned num_clips);
};
......
......@@ -72,6 +72,7 @@
#define DRM_VMW_PARAM_FIFO_OFFSET 3
#define DRM_VMW_PARAM_HW_CAPS 4
#define DRM_VMW_PARAM_FIFO_CAPS 5
#define DRM_VMW_PARAM_MAX_FB_SIZE 6
/**
* struct drm_vmw_getparam_arg
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment