Commit 1e95ab58 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'vmwgfx-next-3.13' of git://people.freedesktop.org/~thomash/linux into drm-next

Pull request for vmwgfx. Currently just the DMA address stuff.

* 'vmwgfx-next-3.13' of git://people.freedesktop.org/~thomash/linux:
  drm/vmwgfx: Use the linux DMA api to get valid device addresses of pages
  drm/ttm: Enable the dma page pool also for intel IOMMUs
parents 4695b039 d92d9851
...@@ -5,10 +5,6 @@ ccflags-y := -Iinclude/drm ...@@ -5,10 +5,6 @@ ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
ttm_bo_manager.o ttm_bo_manager.o ttm_page_alloc_dma.o
ifeq ($(CONFIG_SWIOTLB),y)
ttm-y += ttm_page_alloc_dma.o
endif
obj-$(CONFIG_DRM_TTM) += ttm.o obj-$(CONFIG_DRM_TTM) += ttm.o
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
* when freed). * when freed).
*/ */
#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
#define pr_fmt(fmt) "[TTM] " fmt #define pr_fmt(fmt) "[TTM] " fmt
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
...@@ -1142,3 +1143,5 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) ...@@ -1142,3 +1143,5 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
#endif
This diff is collapsed.
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h> #include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_module.h>
#include <linux/dma_remapping.h>
#define VMWGFX_DRIVER_NAME "vmwgfx" #define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
...@@ -185,6 +186,9 @@ static struct pci_device_id vmw_pci_id_list[] = { ...@@ -185,6 +186,9 @@ static struct pci_device_id vmw_pci_id_list[] = {
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
static int vmw_force_iommu;
static int vmw_restrict_iommu;
static int vmw_force_coherent;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *); static void vmw_master_init(struct vmw_master *);
...@@ -193,6 +197,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, ...@@ -193,6 +197,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
module_param_named(enable_fbdev, enable_fbdev, int, 0600); module_param_named(enable_fbdev, enable_fbdev, int, 0600);
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
static void vmw_print_capabilities(uint32_t capabilities) static void vmw_print_capabilities(uint32_t capabilities)
{ {
...@@ -427,12 +438,78 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) ...@@ -427,12 +438,78 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
dev_priv->initial_height = height; dev_priv->initial_height = height;
} }
/**
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
* system.
*
* @dev_priv: Pointer to a struct vmw_private
*
* This functions tries to determine the IOMMU setup and what actions
* need to be taken by the driver to make system pages visible to the
* device.
* If this function decides that DMA is not possible, it returns -EINVAL.
* The driver may then try to disable features of the device that require
* DMA.
*/
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
{
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
static const char *names[vmw_dma_map_max] = {
[vmw_dma_phys] = "Using physical TTM page addresses.",
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_enabled) {
dev_priv->map_mode = vmw_dma_map_populate;
goto out_fixup;
}
#endif
if (!(vmw_force_iommu || vmw_force_coherent)) {
dev_priv->map_mode = vmw_dma_phys;
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
}
dev_priv->map_mode = vmw_dma_map_populate;
if (dma_ops->sync_single_for_cpu)
dev_priv->map_mode = vmw_dma_alloc_coherent;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl() == 0)
dev_priv->map_mode = vmw_dma_map_populate;
#endif
out_fixup:
if (dev_priv->map_mode == vmw_dma_map_populate &&
vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
/*
* No coherent page pool
*/
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
return -EINVAL;
#endif
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
}
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{ {
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
int ret; int ret;
uint32_t svga_id; uint32_t svga_id;
enum vmw_res_type i; enum vmw_res_type i;
bool refuse_dma = false;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) { if (unlikely(dev_priv == NULL)) {
...@@ -481,6 +558,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -481,6 +558,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
} }
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
refuse_dma = true;
}
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
...@@ -558,7 +640,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -558,7 +640,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
} }
dev_priv->has_gmr = true; dev_priv->has_gmr = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
dev_priv->max_gmr_ids) != 0) { dev_priv->max_gmr_ids) != 0) {
DRM_INFO("No GMR memory available. " DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n"); "Graphics memory resources are very limited.\n");
......
...@@ -177,6 +177,58 @@ struct vmw_res_cache_entry { ...@@ -177,6 +177,58 @@ struct vmw_res_cache_entry {
struct vmw_resource_val_node *node; struct vmw_resource_val_node *node;
}; };
/**
* enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
*/
enum vmw_dma_map_mode {
vmw_dma_phys, /* Use physical page addresses */
vmw_dma_alloc_coherent, /* Use TTM coherent pages */
vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
vmw_dma_map_bind, /* Unmap from DMA just before unbind */
vmw_dma_map_max
};
/**
* struct vmw_sg_table - Scatter/gather table for binding, with additional
* device-specific information.
*
* @sgt: Pointer to a struct sg_table with binding information
* @num_regions: Number of regions with device-address contigous pages
*/
struct vmw_sg_table {
enum vmw_dma_map_mode mode;
struct page **pages;
const dma_addr_t *addrs;
struct sg_table *sgt;
unsigned long num_regions;
unsigned long num_pages;
};
/**
* struct vmw_piter - Page iterator that iterates over a list of pages
* and DMA addresses that could be either a scatter-gather list or
* arrays
*
* @pages: Array of page pointers to the pages.
* @addrs: DMA addresses to the pages if coherent pages are used.
* @iter: Scatter-gather page iterator. Current position in SG list.
* @i: Current position in arrays.
* @num_pages: Number of pages total.
* @next: Function to advance the iterator. Returns false if past the list
* of pages, true otherwise.
* @dma_address: Function to return the DMA address of the current page.
*/
struct vmw_piter {
struct page **pages;
const dma_addr_t *addrs;
struct sg_page_iter iter;
unsigned long i;
unsigned long num_pages;
bool (*next)(struct vmw_piter *);
dma_addr_t (*dma_address)(struct vmw_piter *);
struct page *(*page)(struct vmw_piter *);
};
struct vmw_sw_context{ struct vmw_sw_context{
struct drm_open_hash res_ht; struct drm_open_hash res_ht;
bool res_ht_initialized; bool res_ht_initialized;
...@@ -358,6 +410,11 @@ struct vmw_private { ...@@ -358,6 +410,11 @@ struct vmw_private {
struct list_head res_lru[vmw_res_max]; struct list_head res_lru[vmw_res_max];
uint32_t used_memory_size; uint32_t used_memory_size;
/*
* DMA mapping stuff.
*/
enum vmw_dma_map_mode map_mode;
}; };
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
...@@ -405,7 +462,7 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga); ...@@ -405,7 +462,7 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
*/ */
extern int vmw_gmr_bind(struct vmw_private *dev_priv, extern int vmw_gmr_bind(struct vmw_private *dev_priv,
struct page *pages[], const struct vmw_sg_table *vsgt,
unsigned long num_pages, unsigned long num_pages,
int gmr_id); int gmr_id);
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
...@@ -568,6 +625,45 @@ extern struct ttm_placement vmw_evictable_placement; ...@@ -568,6 +625,45 @@ extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement; extern struct ttm_placement vmw_srf_placement;
extern struct ttm_bo_driver vmw_bo_driver; extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev); extern int vmw_dma_quiescent(struct drm_device *dev);
extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt,
unsigned long p_offs);
/**
* vmw_piter_next - Advance the iterator one page.
*
* @viter: Pointer to the iterator to advance.
*
* Returns false if past the list of pages, true otherwise.
*/
static inline bool vmw_piter_next(struct vmw_piter *viter)
{
return viter->next(viter);
}
/**
* vmw_piter_dma_addr - Return the DMA address of the current page.
*
* @viter: Pointer to the iterator
*
* Returns the DMA address of the page pointed to by @viter.
*/
static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
{
return viter->dma_address(viter);
}
/**
* vmw_piter_page - Return a pointer to the current page.
*
* @viter: Pointer to the iterator
*
* Returns the DMA address of the page pointed to by @viter.
*/
static inline struct page *vmw_piter_page(struct vmw_piter *viter)
{
return viter->page(viter);
}
/** /**
* Command submission - vmwgfx_execbuf.c * Command submission - vmwgfx_execbuf.c
......
...@@ -32,9 +32,11 @@ ...@@ -32,9 +32,11 @@
#define VMW_PPN_SIZE (sizeof(unsigned long)) #define VMW_PPN_SIZE (sizeof(unsigned long))
/* A future safe maximum remap size. */ /* A future safe maximum remap size. */
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
#define DMA_ADDR_INVALID ((dma_addr_t) 0)
#define DMA_PAGE_INVALID 0UL
static int vmw_gmr2_bind(struct vmw_private *dev_priv, static int vmw_gmr2_bind(struct vmw_private *dev_priv,
struct page *pages[], struct vmw_piter *iter,
unsigned long num_pages, unsigned long num_pages,
int gmr_id) int gmr_id)
{ {
...@@ -81,11 +83,13 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, ...@@ -81,11 +83,13 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
for (i = 0; i < nr; ++i) { for (i = 0; i < nr; ++i) {
if (VMW_PPN_SIZE <= 4) if (VMW_PPN_SIZE <= 4)
*cmd = page_to_pfn(*pages++); *cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
else else
*((uint64_t *)cmd) = page_to_pfn(*pages++); *((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
PAGE_SHIFT;
cmd += VMW_PPN_SIZE / sizeof(*cmd); cmd += VMW_PPN_SIZE / sizeof(*cmd);
vmw_piter_next(iter);
} }
num_pages -= nr; num_pages -= nr;
...@@ -120,22 +124,54 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv, ...@@ -120,22 +124,54 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, define_size); vmw_fifo_commit(dev_priv, define_size);
} }
static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
struct list_head *desc_pages)
{
struct page *page, *next;
struct svga_guest_mem_descriptor *page_virtual;
unsigned int desc_per_page = PAGE_SIZE /
sizeof(struct svga_guest_mem_descriptor) - 1;
if (list_empty(desc_pages))
return;
list_for_each_entry_safe(page, next, desc_pages, lru) {
list_del_init(&page->lru);
if (likely(desc_dma != DMA_ADDR_INVALID)) {
dma_unmap_page(dev, desc_dma, PAGE_SIZE,
DMA_TO_DEVICE);
}
page_virtual = kmap_atomic(page);
desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT;
kunmap_atomic(page_virtual);
__free_page(page);
}
}
/** /**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors. * the number of used descriptors.
*
*/ */
static int vmw_gmr_build_descriptors(struct list_head *desc_pages, static int vmw_gmr_build_descriptors(struct device *dev,
struct page *pages[], struct list_head *desc_pages,
unsigned long num_pages) struct vmw_piter *iter,
unsigned long num_pages,
dma_addr_t *first_dma)
{ {
struct page *page, *next; struct page *page;
struct svga_guest_mem_descriptor *page_virtual = NULL; struct svga_guest_mem_descriptor *page_virtual = NULL;
struct svga_guest_mem_descriptor *desc_virtual = NULL; struct svga_guest_mem_descriptor *desc_virtual = NULL;
unsigned int desc_per_page; unsigned int desc_per_page;
unsigned long prev_pfn; unsigned long prev_pfn;
unsigned long pfn; unsigned long pfn;
int ret; int ret;
dma_addr_t desc_dma;
desc_per_page = PAGE_SIZE / desc_per_page = PAGE_SIZE /
sizeof(struct svga_guest_mem_descriptor) - 1; sizeof(struct svga_guest_mem_descriptor) - 1;
...@@ -148,23 +184,12 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages, ...@@ -148,23 +184,12 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
} }
list_add_tail(&page->lru, desc_pages); list_add_tail(&page->lru, desc_pages);
/*
* Point previous page terminating descriptor to this
* page before unmapping it.
*/
if (likely(page_virtual != NULL)) {
desc_virtual->ppn = page_to_pfn(page);
kunmap_atomic(page_virtual);
}
page_virtual = kmap_atomic(page); page_virtual = kmap_atomic(page);
desc_virtual = page_virtual - 1; desc_virtual = page_virtual - 1;
prev_pfn = ~(0UL); prev_pfn = ~(0UL);
while (likely(num_pages != 0)) { while (likely(num_pages != 0)) {
pfn = page_to_pfn(*pages); pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
if (pfn != prev_pfn + 1) { if (pfn != prev_pfn + 1) {
...@@ -181,104 +206,81 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages, ...@@ -181,104 +206,81 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
} }
prev_pfn = pfn; prev_pfn = pfn;
--num_pages; --num_pages;
++pages; vmw_piter_next(iter);
} }
(++desc_virtual)->ppn = cpu_to_le32(0); (++desc_virtual)->ppn = DMA_PAGE_INVALID;
desc_virtual->num_pages = cpu_to_le32(0); desc_virtual->num_pages = cpu_to_le32(0);
kunmap_atomic(page_virtual);
} }
if (likely(page_virtual != NULL)) desc_dma = 0;
list_for_each_entry_reverse(page, desc_pages, lru) {
page_virtual = kmap_atomic(page);
page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT;
kunmap_atomic(page_virtual); kunmap_atomic(page_virtual);
desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, desc_dma)))
goto out_err;
}
*first_dma = desc_dma;
return 0; return 0;
out_err: out_err:
list_for_each_entry_safe(page, next, desc_pages, lru) { vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
list_del_init(&page->lru);
__free_page(page);
}
return ret; return ret;
} }
static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
{
struct page *page, *next;
list_for_each_entry_safe(page, next, desc_pages, lru) {
list_del_init(&page->lru);
__free_page(page);
}
}
static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
int gmr_id, struct list_head *desc_pages) int gmr_id, dma_addr_t desc_dma)
{ {
struct page *page;
if (unlikely(list_empty(desc_pages)))
return;
page = list_entry(desc_pages->next, struct page, lru);
mutex_lock(&dev_priv->hw_mutex); mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
wmb(); wmb();
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page)); vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
mb(); mb();
mutex_unlock(&dev_priv->hw_mutex); mutex_unlock(&dev_priv->hw_mutex);
} }
/**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors.
*/
static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
unsigned long num_pages)
{
unsigned long prev_pfn = ~(0UL);
unsigned long pfn;
unsigned long descriptors = 0;
while (num_pages--) {
pfn = page_to_pfn(*pages++);
if (prev_pfn + 1 != pfn)
++descriptors;
prev_pfn = pfn;
}
return descriptors;
}
int vmw_gmr_bind(struct vmw_private *dev_priv, int vmw_gmr_bind(struct vmw_private *dev_priv,
struct page *pages[], const struct vmw_sg_table *vsgt,
unsigned long num_pages, unsigned long num_pages,
int gmr_id) int gmr_id)
{ {
struct list_head desc_pages; struct list_head desc_pages;
dma_addr_t desc_dma = 0;
struct device *dev = dev_priv->dev->dev;
struct vmw_piter data_iter;
int ret; int ret;
vmw_piter_start(&data_iter, vsgt, 0);
if (unlikely(!vmw_piter_next(&data_iter)))
return 0;
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id); return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
return -EINVAL; return -EINVAL;
if (vmw_gmr_count_descriptors(pages, num_pages) > if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
dev_priv->max_gmr_descriptors)
return -EINVAL; return -EINVAL;
INIT_LIST_HEAD(&desc_pages); INIT_LIST_HEAD(&desc_pages);
ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages); ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
num_pages, &desc_dma);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages); vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
vmw_gmr_free_descriptors(&desc_pages); vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
return 0; return 0;
} }
......
...@@ -62,7 +62,7 @@ extern void ttm_pool_unpopulate(struct ttm_tt *ttm); ...@@ -62,7 +62,7 @@ extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data); extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
#ifdef CONFIG_SWIOTLB #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
/** /**
* Initialize pool allocator. * Initialize pool allocator.
*/ */
...@@ -94,6 +94,15 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) ...@@ -94,6 +94,15 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{ {
return 0; return 0;
} }
static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
struct device *dev)
{
return -ENOMEM;
}
static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
struct device *dev)
{
}
#endif #endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment