Commit 93fbff63 authored by Jan Beulich's avatar Jan Beulich Committed by Tony Luck

[IA64] make swiotlb use bus_to_virt/virt_to_bus

Convert all phys_to_virt/virt_to_phys uses to bus_to_virt/virt_to_bus, as is
what is meant and what is needed in (at least) some virtualized environments
like Xen.
Signed-off-by: default avatarJan Beulich <jbeulich@novell.com>
Acked-by: default avatarMuli Ben-Yehuda <muli@il.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent cde14bbf
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
( (val) & ( (align) - 1))) ( (val) & ( (align) - 1)))
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) #define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
/* /*
* Maximum allowable number of contiguous slabs to map, * Maximum allowable number of contiguous slabs to map,
...@@ -163,7 +163,7 @@ swiotlb_init_with_default_size (size_t default_size) ...@@ -163,7 +163,7 @@ swiotlb_init_with_default_size (size_t default_size)
*/ */
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
} }
void void
...@@ -244,7 +244,7 @@ swiotlb_late_init_with_default_size (size_t default_size) ...@@ -244,7 +244,7 @@ swiotlb_late_init_with_default_size (size_t default_size)
printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
"0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
return 0; return 0;
...@@ -445,7 +445,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -445,7 +445,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
flags |= GFP_DMA; flags |= GFP_DMA;
ret = (void *)__get_free_pages(flags, order); ret = (void *)__get_free_pages(flags, order);
if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
/* /*
* The allocated memory isn't reachable by the device. * The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single(). * Fall back on swiotlb_map_single().
...@@ -465,11 +465,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -465,11 +465,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (swiotlb_dma_mapping_error(handle)) if (swiotlb_dma_mapping_error(handle))
return NULL; return NULL;
ret = phys_to_virt(handle); ret = bus_to_virt(handle);
} }
memset(ret, 0, size); memset(ret, 0, size);
dev_addr = virt_to_phys(ret); dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */ /* Confirm address can be DMA'd by device */
if (address_needs_mapping(hwdev, dev_addr)) { if (address_needs_mapping(hwdev, dev_addr)) {
...@@ -525,7 +525,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) ...@@ -525,7 +525,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
dma_addr_t dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{ {
unsigned long dev_addr = virt_to_phys(ptr); unsigned long dev_addr = virt_to_bus(ptr);
void *map; void *map;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
...@@ -546,7 +546,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) ...@@ -546,7 +546,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
map = io_tlb_overflow_buffer; map = io_tlb_overflow_buffer;
} }
dev_addr = virt_to_phys(map); dev_addr = virt_to_bus(map);
/* /*
* Ensure that the address returned is DMA'ble * Ensure that the address returned is DMA'ble
...@@ -569,7 +569,7 @@ void ...@@ -569,7 +569,7 @@ void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
int dir) int dir)
{ {
char *dma_addr = phys_to_virt(dev_addr); char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
...@@ -592,7 +592,7 @@ static inline void ...@@ -592,7 +592,7 @@ static inline void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, int target) size_t size, int dir, int target)
{ {
char *dma_addr = phys_to_virt(dev_addr); char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
...@@ -623,7 +623,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, ...@@ -623,7 +623,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size, unsigned long offset, size_t size,
int dir, int target) int dir, int target)
{ {
char *dma_addr = phys_to_virt(dev_addr) + offset; char *dma_addr = bus_to_virt(dev_addr) + offset;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
...@@ -676,7 +676,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, ...@@ -676,7 +676,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
for (i = 0; i < nelems; i++, sg++) { for (i = 0; i < nelems; i++, sg++) {
addr = SG_ENT_VIRT_ADDRESS(sg); addr = SG_ENT_VIRT_ADDRESS(sg);
dev_addr = virt_to_phys(addr); dev_addr = virt_to_bus(addr);
if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
void *map = map_single(hwdev, addr, sg->length, dir); void *map = map_single(hwdev, addr, sg->length, dir);
if (!map) { if (!map) {
...@@ -709,7 +709,8 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, ...@@ -709,7 +709,8 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
for (i = 0; i < nelems; i++, sg++) for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); unmap_single(hwdev, bus_to_virt(sg->dma_address),
sg->dma_length, dir);
else if (dir == DMA_FROM_DEVICE) else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
} }
...@@ -731,7 +732,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, ...@@ -731,7 +732,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
for (i = 0; i < nelems; i++, sg++) for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
sync_single(hwdev, (void *) sg->dma_address, sync_single(hwdev, bus_to_virt(sg->dma_address),
sg->dma_length, dir, target); sg->dma_length, dir, target);
else if (dir == DMA_FROM_DEVICE) else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
...@@ -754,7 +755,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, ...@@ -754,7 +755,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int int
swiotlb_dma_mapping_error(dma_addr_t dma_addr) swiotlb_dma_mapping_error(dma_addr_t dma_addr)
{ {
return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
} }
/* /*
...@@ -766,7 +767,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) ...@@ -766,7 +767,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
int int
swiotlb_dma_supported (struct device *hwdev, u64 mask) swiotlb_dma_supported (struct device *hwdev, u64 mask)
{ {
return virt_to_phys(io_tlb_end - 1) <= mask; return virt_to_bus(io_tlb_end - 1) <= mask;
} }
EXPORT_SYMBOL(swiotlb_init); EXPORT_SYMBOL(swiotlb_init);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment