Commit 8ee15f32 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.1-1' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:
 "Fix a sparc64 sun4v_pci regression introduced in this merged window,
  and a dma-debug stracktrace regression from the big refactor last
  merge window"

* tag 'dma-mapping-5.1-1' of git://git.infradead.org/users/hch/dma-mapping:
  dma-debug: only skip one stackframe entry
  sparc64/pci_sun4v: fix ATU checks for large DMA masks
parents 4876191c 8c516543
...@@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns ...@@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
p->npages = 0; p->npages = 0;
} }
static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
{
return iommu->atu && mask > DMA_BIT_MASK(32);
}
/* Interrupts must be disabled. */ /* Interrupts must be disabled. */
static long iommu_batch_flush(struct iommu_batch *p, u64 mask) static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{ {
...@@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask) ...@@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) { while (npages != 0) {
if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) { if (!iommu_use_atu(pbm->iommu, mask)) {
num = pci_sun4v_iommu_map(devhandle, num = pci_sun4v_iommu_map(devhandle,
HV_PCI_TSBID(0, entry), HV_PCI_TSBID(0, entry),
npages, npages,
...@@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, ...@@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
unsigned long flags, order, first_page, npages, n; unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0; unsigned long prot = 0;
struct iommu *iommu; struct iommu *iommu;
struct atu *atu;
struct iommu_map_table *tbl; struct iommu_map_table *tbl;
struct page *page; struct page *page;
void *ret; void *ret;
...@@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, ...@@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
memset((char *)first_page, 0, PAGE_SIZE << order); memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
atu = iommu->atu;
mask = dev->coherent_dma_mask; mask = dev->coherent_dma_mask;
if (mask <= DMA_BIT_MASK(32) || !atu) if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl; tbl = &iommu->tbl;
else else
tbl = &atu->tbl; tbl = &iommu->atu->tbl;
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0); (unsigned long)(-1), 0);
...@@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, ...@@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
atu = iommu->atu; atu = iommu->atu;
devhandle = pbm->devhandle; devhandle = pbm->devhandle;
if (dvma <= DMA_BIT_MASK(32)) { if (!iommu_use_atu(iommu, dvma)) {
tbl = &iommu->tbl; tbl = &iommu->tbl;
iotsb_num = 0; /* we don't care for legacy iommu */ iotsb_num = 0; /* we don't care for legacy iommu */
} else { } else {
...@@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, ...@@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages >>= IO_PAGE_SHIFT; npages >>= IO_PAGE_SHIFT;
mask = *dev->dma_mask; mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32)) if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl; tbl = &iommu->tbl;
else else
tbl = &atu->tbl; tbl = &atu->tbl;
...@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT; IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
mask = *dev->dma_mask; mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32)) if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl; tbl = &iommu->tbl;
else else
tbl = &atu->tbl; tbl = &atu->tbl;
......
...@@ -706,7 +706,7 @@ static struct dma_debug_entry *dma_entry_alloc(void) ...@@ -706,7 +706,7 @@ static struct dma_debug_entry *dma_entry_alloc(void)
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
entry->stacktrace.entries = entry->st_entries; entry->stacktrace.entries = entry->st_entries;
entry->stacktrace.skip = 2; entry->stacktrace.skip = 1;
save_stack_trace(&entry->stacktrace); save_stack_trace(&entry->stacktrace);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment