Commit 8fd524b3 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Ingo Molnar

x86: Kill bad_dma_address variable

This kills bad_dma_address variable, the old mechanism to enable
IOMMU drivers to make dma_mapping_error() work in IOMMU's
specific way.

bad_dma_address variable was introduced to enable IOMMU drivers
to make dma_mapping_error() work in IOMMU's specific way.
However, it can't handle systems that use both swiotlb and HW
IOMMU. SO we introduced dma_map_ops->mapping_error to solve that
case.

Intel VT-d, GART, and swiotlb already use
dma_map_ops->mapping_error. Calgary, AMD IOMMU, and nommu use
zero for an error dma address. This adds DMA_ERROR_CODE and
converts them to use it (as SPARC and POWER does).
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: default avatarJesse Barnes <jbarnes@virtuousgeek.org>
Cc: muli@il.ibm.com
Cc: joerg.roedel@amd.com
LKML-Reference: <1258287594-8777-3-git-send-email-fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 42109197
...@@ -20,7 +20,8 @@ ...@@ -20,7 +20,8 @@
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
#endif #endif
extern dma_addr_t bad_dma_address; #define DMA_ERROR_CODE 0
extern int iommu_merge; extern int iommu_merge;
extern struct device x86_dma_fallback_dev; extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow; extern int panic_on_overflow;
...@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
if (ops->mapping_error) if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr); return ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address); return (dma_addr == DMA_ERROR_CODE);
} }
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
......
...@@ -928,7 +928,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, ...@@ -928,7 +928,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
} }
if (unlikely(address == -1)) if (unlikely(address == -1))
address = bad_dma_address; address = DMA_ERROR_CODE;
WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
...@@ -1544,7 +1544,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, ...@@ -1544,7 +1544,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
pte = dma_ops_get_pte(dom, address); pte = dma_ops_get_pte(dom, address);
if (!pte) if (!pte)
return bad_dma_address; return DMA_ERROR_CODE;
__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
...@@ -1625,7 +1625,7 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -1625,7 +1625,7 @@ static dma_addr_t __map_single(struct device *dev,
retry: retry:
address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
dma_mask); dma_mask);
if (unlikely(address == bad_dma_address)) { if (unlikely(address == DMA_ERROR_CODE)) {
/* /*
* setting next_address here will let the address * setting next_address here will let the address
* allocator only scan the new allocated range in the * allocator only scan the new allocated range in the
...@@ -1646,7 +1646,7 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -1646,7 +1646,7 @@ static dma_addr_t __map_single(struct device *dev,
start = address; start = address;
for (i = 0; i < pages; ++i) { for (i = 0; i < pages; ++i) {
ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
if (ret == bad_dma_address) if (ret == DMA_ERROR_CODE)
goto out_unmap; goto out_unmap;
paddr += PAGE_SIZE; paddr += PAGE_SIZE;
...@@ -1674,7 +1674,7 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -1674,7 +1674,7 @@ static dma_addr_t __map_single(struct device *dev,
dma_ops_free_addresses(dma_dom, address, pages); dma_ops_free_addresses(dma_dom, address, pages);
return bad_dma_address; return DMA_ERROR_CODE;
} }
/* /*
...@@ -1690,7 +1690,7 @@ static void __unmap_single(struct amd_iommu *iommu, ...@@ -1690,7 +1690,7 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_addr_t i, start; dma_addr_t i, start;
unsigned int pages; unsigned int pages;
if ((dma_addr == bad_dma_address) || if ((dma_addr == DMA_ERROR_CODE) ||
(dma_addr + size > dma_dom->aperture_size)) (dma_addr + size > dma_dom->aperture_size))
return; return;
...@@ -1732,7 +1732,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, ...@@ -1732,7 +1732,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
INC_STATS_COUNTER(cnt_map_single); INC_STATS_COUNTER(cnt_map_single);
if (!check_device(dev)) if (!check_device(dev))
return bad_dma_address; return DMA_ERROR_CODE;
dma_mask = *dev->dma_mask; dma_mask = *dev->dma_mask;
...@@ -1743,12 +1743,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page, ...@@ -1743,12 +1743,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
return (dma_addr_t)paddr; return (dma_addr_t)paddr;
if (!dma_ops_domain(domain)) if (!dma_ops_domain(domain))
return bad_dma_address; return DMA_ERROR_CODE;
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
dma_mask); dma_mask);
if (addr == bad_dma_address) if (addr == DMA_ERROR_CODE)
goto out; goto out;
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
...@@ -1957,7 +1957,7 @@ static void *alloc_coherent(struct device *dev, size_t size, ...@@ -1957,7 +1957,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
*dma_addr = __map_single(dev, iommu, domain->priv, paddr, *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
size, DMA_BIDIRECTIONAL, true, dma_mask); size, DMA_BIDIRECTIONAL, true, dma_mask);
if (*dma_addr == bad_dma_address) { if (*dma_addr == DMA_ERROR_CODE) {
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
goto out_free; goto out_free;
} }
...@@ -2110,7 +2110,6 @@ int __init amd_iommu_init_dma_ops(void) ...@@ -2110,7 +2110,6 @@ int __init amd_iommu_init_dma_ops(void)
prealloc_protection_domains(); prealloc_protection_domains();
iommu_detected = 1; iommu_detected = 1;
bad_dma_address = 0;
swiotlb = 0; swiotlb = 0;
#ifdef CONFIG_GART_IOMMU #ifdef CONFIG_GART_IOMMU
gart_iommu_aperture_disabled = 1; gart_iommu_aperture_disabled = 1;
......
...@@ -245,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev, ...@@ -245,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (panic_on_overflow) if (panic_on_overflow)
panic("Calgary: fix the allocator.\n"); panic("Calgary: fix the allocator.\n");
else else
return bad_dma_address; return DMA_ERROR_CODE;
} }
} }
...@@ -261,11 +261,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, ...@@ -261,11 +261,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *vaddr, unsigned int npages, int direction) void *vaddr, unsigned int npages, int direction)
{ {
unsigned long entry; unsigned long entry;
dma_addr_t ret = bad_dma_address; dma_addr_t ret = DMA_ERROR_CODE;
entry = iommu_range_alloc(dev, tbl, npages); entry = iommu_range_alloc(dev, tbl, npages);
if (unlikely(entry == bad_dma_address)) if (unlikely(entry == DMA_ERROR_CODE))
goto error; goto error;
/* set the return dma address */ /* set the return dma address */
...@@ -280,7 +280,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, ...@@ -280,7 +280,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
error: error:
printk(KERN_WARNING "Calgary: failed to allocate %u pages in " printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
"iommu %p\n", npages, tbl); "iommu %p\n", npages, tbl);
return bad_dma_address; return DMA_ERROR_CODE;
} }
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
...@@ -291,8 +291,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, ...@@ -291,8 +291,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long flags; unsigned long flags;
/* were we called with bad_dma_address? */ /* were we called with bad_dma_address? */
badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr); "address 0x%Lx\n", dma_addr);
return; return;
...@@ -374,7 +374,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -374,7 +374,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
entry = iommu_range_alloc(dev, tbl, npages); entry = iommu_range_alloc(dev, tbl, npages);
if (entry == bad_dma_address) { if (entry == DMA_ERROR_CODE) {
/* makes sure unmap knows to stop */ /* makes sure unmap knows to stop */
s->dma_length = 0; s->dma_length = 0;
goto error; goto error;
...@@ -392,7 +392,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -392,7 +392,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
error: error:
calgary_unmap_sg(dev, sg, nelems, dir, NULL); calgary_unmap_sg(dev, sg, nelems, dir, NULL);
for_each_sg(sg, s, nelems, i) { for_each_sg(sg, s, nelems, i) {
sg->dma_address = bad_dma_address; sg->dma_address = DMA_ERROR_CODE;
sg->dma_length = 0; sg->dma_length = 0;
} }
return 0; return 0;
...@@ -447,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, ...@@ -447,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
/* set up tces to cover the allocated range */ /* set up tces to cover the allocated range */
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
if (mapping == bad_dma_address) if (mapping == DMA_ERROR_CODE)
goto free; goto free;
*dma_handle = mapping; *dma_handle = mapping;
return ret; return ret;
...@@ -728,7 +728,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev) ...@@ -728,7 +728,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
struct iommu_table *tbl = pci_iommu(dev->bus); struct iommu_table *tbl = pci_iommu(dev->bus);
/* reserve EMERGENCY_PAGES from bad_dma_address and up */ /* reserve EMERGENCY_PAGES from bad_dma_address and up */
iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES); iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);
/* avoid the BIOS/VGA first 640KB-1MB region */ /* avoid the BIOS/VGA first 640KB-1MB region */
/* for CalIOC2 - avoid the entire first MB */ /* for CalIOC2 - avoid the entire first MB */
...@@ -1359,8 +1359,6 @@ static int __init calgary_iommu_init(void) ...@@ -1359,8 +1359,6 @@ static int __init calgary_iommu_init(void)
return ret; return ret;
} }
bad_dma_address = 0x0;
return 0; return 0;
} }
......
...@@ -43,9 +43,6 @@ int iommu_detected __read_mostly = 0; ...@@ -43,9 +43,6 @@ int iommu_detected __read_mostly = 0;
*/ */
int iommu_pass_through __read_mostly; int iommu_pass_through __read_mostly;
dma_addr_t bad_dma_address __read_mostly = 0;
EXPORT_SYMBOL(bad_dma_address);
/* Dummy device used for NULL arguments (normally ISA). */ /* Dummy device used for NULL arguments (normally ISA). */
struct device x86_dma_fallback_dev = { struct device x86_dma_fallback_dev = {
.init_name = "fallback device", .init_name = "fallback device",
......
...@@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, ...@@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
dma_addr_t bus = page_to_phys(page) + offset; dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0); WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size)) if (!check_addr("map_single", dev, bus, size))
return bad_dma_address; return DMA_ERROR_CODE;
flush_write_buffers(); flush_write_buffers();
return bus; return bus;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment