Commit d4df33b0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb

Pull swiotlb updates from Konrad Rzeszutek Wilk:
 "One compiler fix, and a bug-fix in swiotlb_nr_tbl() and
  swiotlb_max_segment() to check also for no_iotlb_memory"

* 'for-linus-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
  swiotlb: fix phys_addr_t overflow warning
  swiotlb: Return consistent SWIOTLB segments/nr_tbl
  swiotlb: Group identical cleanup in swiotlb_cleanup()
parents 366a4e38 8492101e
...@@ -402,7 +402,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -402,7 +402,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
attrs); attrs);
if (map == DMA_MAPPING_ERROR) if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map); dev_addr = xen_phys_to_bus(map);
......
...@@ -129,15 +129,17 @@ setup_io_tlb_npages(char *str) ...@@ -129,15 +129,17 @@ setup_io_tlb_npages(char *str)
} }
early_param("swiotlb", setup_io_tlb_npages); early_param("swiotlb", setup_io_tlb_npages);
static bool no_iotlb_memory;
unsigned long swiotlb_nr_tbl(void) unsigned long swiotlb_nr_tbl(void)
{ {
return io_tlb_nslabs; return unlikely(no_iotlb_memory) ? 0 : io_tlb_nslabs;
} }
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
unsigned int swiotlb_max_segment(void) unsigned int swiotlb_max_segment(void)
{ {
return max_segment; return unlikely(no_iotlb_memory) ? 0 : max_segment;
} }
EXPORT_SYMBOL_GPL(swiotlb_max_segment); EXPORT_SYMBOL_GPL(swiotlb_max_segment);
...@@ -160,8 +162,6 @@ unsigned long swiotlb_size_or_default(void) ...@@ -160,8 +162,6 @@ unsigned long swiotlb_size_or_default(void)
return size ? size : (IO_TLB_DEFAULT_SIZE); return size ? size : (IO_TLB_DEFAULT_SIZE);
} }
static bool no_iotlb_memory;
void swiotlb_print_info(void) void swiotlb_print_info(void)
{ {
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
...@@ -317,6 +317,14 @@ swiotlb_late_init_with_default_size(size_t default_size) ...@@ -317,6 +317,14 @@ swiotlb_late_init_with_default_size(size_t default_size)
return rc; return rc;
} }
static void swiotlb_cleanup(void)
{
io_tlb_end = 0;
io_tlb_start = 0;
io_tlb_nslabs = 0;
max_segment = 0;
}
int int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{ {
...@@ -367,10 +375,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) ...@@ -367,10 +375,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
sizeof(int))); sizeof(int)));
io_tlb_list = NULL; io_tlb_list = NULL;
cleanup3: cleanup3:
io_tlb_end = 0; swiotlb_cleanup();
io_tlb_start = 0;
io_tlb_nslabs = 0;
max_segment = 0;
return -ENOMEM; return -ENOMEM;
} }
...@@ -394,10 +399,7 @@ void __init swiotlb_exit(void) ...@@ -394,10 +399,7 @@ void __init swiotlb_exit(void)
memblock_free_late(io_tlb_start, memblock_free_late(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
} }
io_tlb_start = 0; swiotlb_cleanup();
io_tlb_end = 0;
io_tlb_nslabs = 0;
max_segment = 0;
} }
/* /*
...@@ -546,7 +548,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, ...@@ -546,7 +548,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
size, io_tlb_nslabs, tmp_io_tlb_used); size, io_tlb_nslabs, tmp_io_tlb_used);
return DMA_MAPPING_ERROR; return (phys_addr_t)DMA_MAPPING_ERROR;
found: found:
io_tlb_used += nslots; io_tlb_used += nslots;
spin_unlock_irqrestore(&io_tlb_lock, flags); spin_unlock_irqrestore(&io_tlb_lock, flags);
...@@ -664,7 +666,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr, ...@@ -664,7 +666,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
/* Oh well, have to allocate and map a bounce buffer. */ /* Oh well, have to allocate and map a bounce buffer. */
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
*phys, size, dir, attrs); *phys, size, dir, attrs);
if (*phys == DMA_MAPPING_ERROR) if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
return false; return false;
/* Ensure that the address returned is DMA'ble */ /* Ensure that the address returned is DMA'ble */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment