Commit 864ad046 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-6.9-2024-03-24' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:
 "This has a set of swiotlb alignment fixes for sometimes very long
  standing bugs from Will. We've been discussion them for a while and
  they should be solid now"

* tag 'dma-mapping-6.9-2024-03-24' of git://git.infradead.org/users/hch/dma-mapping:
  swiotlb: Reinstate page-alignment for mappings >= PAGE_SIZE
  iommu/dma: Force swiotlb_max_mapping_size on an untrusted device
  swiotlb: Fix alignment checks when both allocation and DMA masks are present
  swiotlb: Honour dma_alloc_coherent() alignment in swiotlb_alloc()
  swiotlb: Enforce page alignment in swiotlb_alloc()
  swiotlb: Fix double-allocation of slots due to broken alignment handling
parents 70293240 14cebf68
...@@ -1711,6 +1711,14 @@ static size_t iommu_dma_opt_mapping_size(void) ...@@ -1711,6 +1711,14 @@ static size_t iommu_dma_opt_mapping_size(void)
return iova_rcache_range(); return iova_rcache_range();
} }
static size_t iommu_dma_max_mapping_size(struct device *dev)
{
if (dev_is_untrusted(dev))
return swiotlb_max_mapping_size(dev);
return SIZE_MAX;
}
static const struct dma_map_ops iommu_dma_ops = { static const struct dma_map_ops iommu_dma_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED, .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc, .alloc = iommu_dma_alloc,
...@@ -1733,6 +1741,7 @@ static const struct dma_map_ops iommu_dma_ops = { ...@@ -1733,6 +1741,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.unmap_resource = iommu_dma_unmap_resource, .unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary, .get_merge_boundary = iommu_dma_get_merge_boundary,
.opt_mapping_size = iommu_dma_opt_mapping_size, .opt_mapping_size = iommu_dma_opt_mapping_size,
.max_mapping_size = iommu_dma_max_mapping_size,
}; };
/* /*
......
...@@ -1003,8 +1003,7 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool ...@@ -1003,8 +1003,7 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
dma_addr_t tbl_dma_addr = dma_addr_t tbl_dma_addr =
phys_to_dma_unencrypted(dev, pool->start) & boundary_mask; phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
unsigned long max_slots = get_max_slots(boundary_mask); unsigned long max_slots = get_max_slots(boundary_mask);
unsigned int iotlb_align_mask = unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
dma_get_min_align_mask(dev) | alloc_align_mask;
unsigned int nslots = nr_slots(alloc_size), stride; unsigned int nslots = nr_slots(alloc_size), stride;
unsigned int offset = swiotlb_align_offset(dev, orig_addr); unsigned int offset = swiotlb_align_offset(dev, orig_addr);
unsigned int index, slots_checked, count = 0, i; unsigned int index, slots_checked, count = 0, i;
...@@ -1016,18 +1015,29 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool ...@@ -1016,18 +1015,29 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
BUG_ON(area_index >= pool->nareas); BUG_ON(area_index >= pool->nareas);
/* /*
* For allocations of PAGE_SIZE or larger only look for page aligned * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
* allocations. * page-aligned in the absence of any other alignment requirements.
* 'alloc_align_mask' was later introduced to specify the alignment
* explicitly, however this is passed as zero for streaming mappings
* and so we preserve the old behaviour there in case any drivers are
* relying on it.
*/ */
if (alloc_size >= PAGE_SIZE) if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
iotlb_align_mask |= ~PAGE_MASK; alloc_align_mask = PAGE_SIZE - 1;
iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
/*
* Ensure that the allocation is at least slot-aligned and update
* 'iotlb_align_mask' to ignore bits that will be preserved when
* offsetting into the allocation.
*/
alloc_align_mask |= (IO_TLB_SIZE - 1);
iotlb_align_mask &= ~alloc_align_mask;
/* /*
* For mappings with an alignment requirement don't bother looping to * For mappings with an alignment requirement don't bother looping to
* unaligned slots once we found an aligned one. * unaligned slots once we found an aligned one.
*/ */
stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
spin_lock_irqsave(&area->lock, flags); spin_lock_irqsave(&area->lock, flags);
if (unlikely(nslots > pool->area_nslabs - area->used)) if (unlikely(nslots > pool->area_nslabs - area->used))
...@@ -1037,11 +1047,14 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool ...@@ -1037,11 +1047,14 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
index = area->index; index = area->index;
for (slots_checked = 0; slots_checked < pool->area_nslabs; ) { for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
phys_addr_t tlb_addr;
slot_index = slot_base + index; slot_index = slot_base + index;
tlb_addr = slot_addr(tbl_dma_addr, slot_index);
if (orig_addr && if ((tlb_addr & alloc_align_mask) ||
(slot_addr(tbl_dma_addr, slot_index) & (orig_addr && (tlb_addr & iotlb_align_mask) !=
iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { (orig_addr & iotlb_align_mask))) {
index = wrap_area_index(pool, index + 1); index = wrap_area_index(pool, index + 1);
slots_checked++; slots_checked++;
continue; continue;
...@@ -1677,16 +1690,24 @@ struct page *swiotlb_alloc(struct device *dev, size_t size) ...@@ -1677,16 +1690,24 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
struct io_tlb_mem *mem = dev->dma_io_tlb_mem; struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool; struct io_tlb_pool *pool;
phys_addr_t tlb_addr; phys_addr_t tlb_addr;
unsigned int align;
int index; int index;
if (!mem) if (!mem)
return NULL; return NULL;
index = swiotlb_find_slots(dev, 0, size, 0, &pool); align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
index = swiotlb_find_slots(dev, 0, size, align, &pool);
if (index == -1) if (index == -1)
return NULL; return NULL;
tlb_addr = slot_addr(pool->start, index); tlb_addr = slot_addr(pool->start, index);
if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
&tlb_addr);
swiotlb_release_slots(dev, tlb_addr);
return NULL;
}
return pfn_to_page(PFN_DOWN(tlb_addr)); return pfn_to_page(PFN_DOWN(tlb_addr));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment