Commit 78eb1ca4 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'vmwgfx-fixes-5.0-2' of git://people.freedesktop.org/~thomash/linux into drm-fixes

A patch set from Christoph for vmwgfx dma mode detection breakage with the
new dma code restructuring in 5.0

A couple of fixes also CC'd stable

Finally an improved IOMMU detection that automatically enables dma mapping
also with other vIOMMUS than the intel one if present and enabled.
Currently trying to start a VM in that case would fail catastrophically.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Thomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190206194735.4663-1-thellstrom@vmware.com
parents 8628752d 9ddac734
......@@ -26,6 +26,7 @@
**************************************************************************/
#include <linux/module.h>
#include <linux/console.h>
#include <linux/dma-mapping.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
......@@ -34,7 +35,6 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
#include <linux/intel-iommu.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
......@@ -545,6 +545,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
dev_priv->initial_height = height;
}
/**
* vmw_assume_iommu - Figure out whether coherent dma-remapping might be
* taking place.
* @dev: Pointer to the struct drm_device.
*
* Return: true if iommu present, false otherwise.
*/
static bool vmw_assume_iommu(struct drm_device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev->dev);
return !dma_is_direct(ops) && ops &&
ops->map_page != dma_direct_map_page;
}
/**
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
* system.
......@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
#ifdef CONFIG_X86
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_enabled) {
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_assume_iommu(dev_priv->dev))
dev_priv->map_mode = vmw_dma_map_populate;
goto out_fixup;
}
#endif
if (!(vmw_force_iommu || vmw_force_coherent)) {
else if (!vmw_force_iommu)
dev_priv->map_mode = vmw_dma_phys;
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
}
dev_priv->map_mode = vmw_dma_map_populate;
if (dma_ops && dma_ops->sync_single_for_cpu)
else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
dev_priv->map_mode = vmw_dma_alloc_coherent;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl() == 0)
else
dev_priv->map_mode = vmw_dma_map_populate;
#endif
#ifdef CONFIG_INTEL_IOMMU
out_fixup:
#endif
if (dev_priv->map_mode == vmw_dma_map_populate &&
vmw_restrict_iommu)
if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
/*
* No coherent page pool
*/
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
/* No TTM coherent page pool? FIXME: Ask TTM instead! */
if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
(dev_priv->map_mode == vmw_dma_alloc_coherent))
return -EINVAL;
#endif
#else /* CONFIG_X86 */
dev_priv->map_mode = vmw_dma_map_populate;
#endif /* CONFIG_X86 */
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
}
......@@ -625,24 +612,20 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
* restriction also for 64-bit systems.
*/
#ifdef CONFIG_INTEL_IOMMU
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int ret = 0;
if (intel_iommu_enabled &&
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
if (dev_priv->map_mode != vmw_dma_phys &&
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
}
return 0;
}
#else
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
return 0;
return ret;
}
#endif
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
......
......@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
*p_fence = NULL;
}
return 0;
return ret;
}
/**
......
......@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
if (!du->pref_active) {
if (!du->pref_active && new_crtc_state->enable) {
ret = -EINVAL;
goto clean;
}
......@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
user_fence_rep)
{
struct vmw_fence_obj *fence = NULL;
uint32_t handle;
int ret;
uint32_t handle = 0;
int ret = 0;
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
out_fence)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment