Commit 5848dc5b authored by Linus Torvalds's avatar Linus Torvalds

dma-debug: remove debug_dma_assert_idle() function

This remoes the code from the COW path to call debug_dma_assert_idle(),
which was added many years ago.

Google shows that it hasn't caught anything in the 6+ years we've had it
apart from a false positive, and Hugh just noticed how it had a very
unfortunate spinlock serialization in the COW path.

He fixed that issue the previous commit (a85ffd59: "dma-debug: fix
debug_dma_assert_idle(), use rcu_read_lock()"), but let's see if anybody
even notices when we remove this function entirely.

NOTE! We keep the dma tracking infrastructure that was added by the
commit that introduced it.  Partly to make it easier to resurrect this
debug code if we ever deside to, and partly because that tracking by pfn
and offset looks quite reasonable.

The problem with this debug code was simply that it was expensive and
didn't seem worth it, not that it was wrong per se.
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a85ffd59
...@@ -67,8 +67,6 @@ extern void debug_dma_sync_sg_for_device(struct device *dev, ...@@ -67,8 +67,6 @@ extern void debug_dma_sync_sg_for_device(struct device *dev,
extern void debug_dma_dump_mappings(struct device *dev); extern void debug_dma_dump_mappings(struct device *dev);
extern void debug_dma_assert_idle(struct page *page);
#else /* CONFIG_DMA_API_DEBUG */ #else /* CONFIG_DMA_API_DEBUG */
static inline void dma_debug_add_bus(struct bus_type *bus) static inline void dma_debug_add_bus(struct bus_type *bus)
...@@ -157,10 +155,6 @@ static inline void debug_dma_dump_mappings(struct device *dev) ...@@ -157,10 +155,6 @@ static inline void debug_dma_dump_mappings(struct device *dev)
{ {
} }
static inline void debug_dma_assert_idle(struct page *page)
{
}
#endif /* CONFIG_DMA_API_DEBUG */ #endif /* CONFIG_DMA_API_DEBUG */
#endif /* __DMA_DEBUG_H */ #endif /* __DMA_DEBUG_H */
...@@ -186,11 +186,6 @@ config DMA_API_DEBUG ...@@ -186,11 +186,6 @@ config DMA_API_DEBUG
drivers like double-freeing of DMA mappings or freeing mappings that drivers like double-freeing of DMA mappings or freeing mappings that
were never allocated. were never allocated.
This also attempts to catch cases where a page owned by DMA is
accessed by the cpu in a way that could cause data corruption. For
example, this enables cow_user_page() to check that the source page is
not undergoing DMA.
This option causes a performance degradation. Use only if you want to This option causes a performance degradation. Use only if you want to
debug device drivers and dma interactions. debug device drivers and dma interactions.
......
...@@ -448,9 +448,6 @@ void debug_dma_dump_mappings(struct device *dev) ...@@ -448,9 +448,6 @@ void debug_dma_dump_mappings(struct device *dev)
* dma_active_cacheline entry to track per event. dma_map_sg(), on the * dma_active_cacheline entry to track per event. dma_map_sg(), on the
* other hand, consumes a single dma_debug_entry, but inserts 'nents' * other hand, consumes a single dma_debug_entry, but inserts 'nents'
* entries into the tree. * entries into the tree.
*
* At any time debug_dma_assert_idle() can be called to trigger a
* warning if any cachelines in the given page are in the active set.
*/ */
static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
static DEFINE_SPINLOCK(radix_lock); static DEFINE_SPINLOCK(radix_lock);
...@@ -497,10 +494,7 @@ static void active_cacheline_inc_overlap(phys_addr_t cln) ...@@ -497,10 +494,7 @@ static void active_cacheline_inc_overlap(phys_addr_t cln)
overlap = active_cacheline_set_overlap(cln, ++overlap); overlap = active_cacheline_set_overlap(cln, ++overlap);
/* If we overflowed the overlap counter then we're potentially /* If we overflowed the overlap counter then we're potentially
* leaking dma-mappings. Otherwise, if maps and unmaps are * leaking dma-mappings.
* balanced then this overflow may cause false negatives in
* debug_dma_assert_idle() as the cacheline may be marked idle
* prematurely.
*/ */
WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
...@@ -555,44 +549,6 @@ static void active_cacheline_remove(struct dma_debug_entry *entry) ...@@ -555,44 +549,6 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
spin_unlock_irqrestore(&radix_lock, flags); spin_unlock_irqrestore(&radix_lock, flags);
} }
/**
* debug_dma_assert_idle() - assert that a page is not undergoing dma
* @page: page to lookup in the dma_active_cacheline tree
*
* Place a call to this routine in cases where the cpu touching the page
* before the dma completes (page is dma_unmapped) will lead to data
* corruption.
*/
void debug_dma_assert_idle(struct page *page)
{
struct dma_debug_entry *entry;
unsigned long pfn;
phys_addr_t cln;
if (dma_debug_disabled())
return;
if (!page)
return;
pfn = page_to_pfn(page);
cln = (phys_addr_t) pfn << CACHELINE_PER_PAGE_SHIFT;
rcu_read_lock();
if (!radix_tree_gang_lookup(&dma_active_cacheline, (void **) &entry,
cln, 1) || entry->pfn != pfn)
entry = NULL;
rcu_read_unlock();
if (!entry)
return;
cln = to_cacheline_number(entry);
err_printk(entry->dev, entry,
"cpu touching an active dma mapped cacheline [cln=%pa]\n",
&cln);
}
/* /*
* Wrapper function for adding an entry to the hash. * Wrapper function for adding an entry to the hash.
* This function takes care of locking itself. * This function takes care of locking itself.
......
...@@ -2411,8 +2411,6 @@ static inline bool cow_user_page(struct page *dst, struct page *src, ...@@ -2411,8 +2411,6 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long addr = vmf->address; unsigned long addr = vmf->address;
debug_dma_assert_idle(src);
if (likely(src)) { if (likely(src)) {
copy_user_highpage(dst, src, addr, vma); copy_user_highpage(dst, src, addr, vma);
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment