Commit 563aaf06 authored by Jan Beulich's avatar Jan Beulich Committed by Tony Luck

[IA64] swiotlb cleanup

- add proper __init decoration to swiotlb's init code (and the code calling
  it, where not already the case)

- replace uses of 'unsigned long' with dma_addr_t where appropriate

- do miscellaneous simplicfication and cleanup
Signed-off-by: default avatarJan Beulich <jbeulich@novell.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 93fbff63
...@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = { ...@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
.dma_supported = NULL, .dma_supported = NULL,
}; };
void pci_swiotlb_init(void) void __init pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN) if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
......
/* /*
* Dynamic DMA mapping support. * Dynamic DMA mapping support.
* *
* This implementation is for IA-64 and EM64T platforms that do not support * This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware). * I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
...@@ -129,23 +129,25 @@ __setup("swiotlb=", setup_io_tlb_npages); ...@@ -129,23 +129,25 @@ __setup("swiotlb=", setup_io_tlb_npages);
* Statically reserve bounce buffer space and initialize bounce buffer data * Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API. * structures for the software IO TLB used to implement the DMA API.
*/ */
void void __init
swiotlb_init_with_default_size (size_t default_size) swiotlb_init_with_default_size(size_t default_size)
{ {
unsigned long i; unsigned long i, bytes;
if (!io_tlb_nslabs) { if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
} }
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* /*
* Get IO TLB memory from the low pages * Get IO TLB memory from the low pages
*/ */
io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); io_tlb_start = alloc_bootmem_low_pages(bytes);
if (!io_tlb_start) if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer"); panic("Cannot allocate SWIOTLB buffer");
io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); io_tlb_end = io_tlb_start + bytes;
/* /*
* Allocate and initialize the free list array. This array is used * Allocate and initialize the free list array. This array is used
...@@ -162,12 +164,15 @@ swiotlb_init_with_default_size (size_t default_size) ...@@ -162,12 +164,15 @@ swiotlb_init_with_default_size (size_t default_size)
* Get the overflow emergency buffer * Get the overflow emergency buffer
*/ */
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
} }
void void __init
swiotlb_init (void) swiotlb_init(void)
{ {
swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
} }
...@@ -178,9 +183,9 @@ swiotlb_init (void) ...@@ -178,9 +183,9 @@ swiotlb_init (void)
* This should be just like above, but with some error catching. * This should be just like above, but with some error catching.
*/ */
int int
swiotlb_late_init_with_default_size (size_t default_size) swiotlb_late_init_with_default_size(size_t default_size)
{ {
unsigned long i, req_nslabs = io_tlb_nslabs; unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
unsigned int order; unsigned int order;
if (!io_tlb_nslabs) { if (!io_tlb_nslabs) {
...@@ -191,8 +196,9 @@ swiotlb_late_init_with_default_size (size_t default_size) ...@@ -191,8 +196,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
/* /*
* Get IO TLB memory from the low pages * Get IO TLB memory from the low pages
*/ */
order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
io_tlb_nslabs = SLABS_PER_PAGE << order; io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
...@@ -205,13 +211,14 @@ swiotlb_late_init_with_default_size (size_t default_size) ...@@ -205,13 +211,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
if (!io_tlb_start) if (!io_tlb_start)
goto cleanup1; goto cleanup1;
if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { if (order != get_order(bytes)) {
printk(KERN_WARNING "Warning: only able to allocate %ld MB " printk(KERN_WARNING "Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20); "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order; io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
} }
io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); io_tlb_end = io_tlb_start + bytes;
memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); memset(io_tlb_start, 0, bytes);
/* /*
* Allocate and initialize the free list array. This array is used * Allocate and initialize the free list array. This array is used
...@@ -242,8 +249,8 @@ swiotlb_late_init_with_default_size (size_t default_size) ...@@ -242,8 +249,8 @@ swiotlb_late_init_with_default_size (size_t default_size)
if (!io_tlb_overflow_buffer) if (!io_tlb_overflow_buffer)
goto cleanup4; goto cleanup4;
printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
"0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, "0x%lx\n", bytes >> 20,
virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
return 0; return 0;
...@@ -256,8 +263,8 @@ swiotlb_late_init_with_default_size (size_t default_size) ...@@ -256,8 +263,8 @@ swiotlb_late_init_with_default_size (size_t default_size)
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int))); sizeof(int)));
io_tlb_list = NULL; io_tlb_list = NULL;
io_tlb_end = NULL;
cleanup2: cleanup2:
io_tlb_end = NULL;
free_pages((unsigned long)io_tlb_start, order); free_pages((unsigned long)io_tlb_start, order);
io_tlb_start = NULL; io_tlb_start = NULL;
cleanup1: cleanup1:
...@@ -433,7 +440,7 @@ void * ...@@ -433,7 +440,7 @@ void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size, swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags) dma_addr_t *dma_handle, gfp_t flags)
{ {
unsigned long dev_addr; dma_addr_t dev_addr;
void *ret; void *ret;
int order = get_order(size); int order = get_order(size);
...@@ -473,8 +480,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -473,8 +480,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
/* Confirm address can be DMA'd by device */ /* Confirm address can be DMA'd by device */
if (address_needs_mapping(hwdev, dev_addr)) { if (address_needs_mapping(hwdev, dev_addr)) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n", printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask, dev_addr); (unsigned long long)*hwdev->dma_mask,
(unsigned long long)dev_addr);
panic("swiotlb_alloc_coherent: allocated memory is out of " panic("swiotlb_alloc_coherent: allocated memory is out of "
"range for device"); "range for device");
} }
...@@ -504,7 +512,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) ...@@ -504,7 +512,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* When the mapping is small enough return a static buffer to limit * When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big. * the damage, or panic when the transfer is too big.
*/ */
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
"device %s\n", size, dev ? dev->bus_id : "?"); "device %s\n", size, dev ? dev->bus_id : "?");
if (size > io_tlb_overflow && do_panic) { if (size > io_tlb_overflow && do_panic) {
...@@ -525,7 +533,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) ...@@ -525,7 +533,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
dma_addr_t dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{ {
unsigned long dev_addr = virt_to_bus(ptr); dma_addr_t dev_addr = virt_to_bus(ptr);
void *map; void *map;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
...@@ -669,7 +677,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, ...@@ -669,7 +677,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir) int dir)
{ {
void *addr; void *addr;
unsigned long dev_addr; dma_addr_t dev_addr;
int i; int i;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
...@@ -765,7 +773,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) ...@@ -765,7 +773,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
* this function. * this function.
*/ */
int int
swiotlb_dma_supported (struct device *hwdev, u64 mask) swiotlb_dma_supported(struct device *hwdev, u64 mask)
{ {
return virt_to_bus(io_tlb_end - 1) <= mask; return virt_to_bus(io_tlb_end - 1) <= mask;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment