Commit 1569d651 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: fix populate_and_map() functions once more

This reverts "drm/ttm: Fix configuration error around populate_and_map()
functions".

This fix has gone into the wrong direction. Those helpers should be
available even when neither CONFIG_INTEL_IOMMU nor CONFIG_SWIOTLB are
set.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 97011249
...@@ -1058,7 +1058,6 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) ...@@ -1058,7 +1058,6 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
} }
EXPORT_SYMBOL(ttm_pool_unpopulate); EXPORT_SYMBOL(ttm_pool_unpopulate);
#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
{ {
unsigned i, j; unsigned i, j;
...@@ -1129,7 +1128,6 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) ...@@ -1129,7 +1128,6 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
ttm_pool_unpopulate(&tt->ttm); ttm_pool_unpopulate(&tt->ttm);
} }
EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
#endif
int ttm_page_alloc_debugfs(struct seq_file *m, void *data) int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{ {
......
...@@ -58,12 +58,21 @@ int ttm_pool_populate(struct ttm_tt *ttm); ...@@ -58,12 +58,21 @@ int ttm_pool_populate(struct ttm_tt *ttm);
*/ */
void ttm_pool_unpopulate(struct ttm_tt *ttm); void ttm_pool_unpopulate(struct ttm_tt *ttm);
/**
* Populates and DMA maps pages to fullfil a ttm_dma_populate() request
*/
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
/**
* Unpopulates and DMA unmaps pages as part of a
* ttm_dma_unpopulate() request */
void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
/** /**
* Output the state of pools to debugfs file * Output the state of pools to debugfs file
*/ */
int ttm_page_alloc_debugfs(struct seq_file *m, void *data); int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
/** /**
* Initialize pool allocator. * Initialize pool allocator.
...@@ -83,17 +92,6 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); ...@@ -83,17 +92,6 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
/**
* Populates and DMA maps pages to fullfil a ttm_dma_populate() request
*/
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
/**
* Unpopulates and DMA unmaps pages as part of a
* ttm_dma_unpopulate() request */
void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
#else #else
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
unsigned max_pages) unsigned max_pages)
...@@ -116,16 +114,6 @@ static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, ...@@ -116,16 +114,6 @@ static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
struct device *dev) struct device *dev)
{ {
} }
static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
{
return -ENOMEM;
}
static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
{
}
#endif #endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment