Commit e81e99ba authored by David Stevens's avatar David Stevens Committed by Joerg Roedel

swiotlb: Support aligned swiotlb buffers

Add an argument to swiotlb_tbl_map_single that specifies the desired
alignment of the allocated buffer. This is used by dma-iommu to ensure
the buffer is aligned to the iova granule size when using swiotlb with
untrusted sub-granule mappings. This addresses an issue where adjacent
slots could be exposed to the untrusted device if IO_TLB_SIZE < iova
granule < PAGE_SIZE.
Signed-off-by: default avatarDavid Stevens <stevensd@chromium.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20210929023300.335969-7-stevensd@google.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 2e727bff
...@@ -818,8 +818,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, ...@@ -818,8 +818,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
size_t padding_size; size_t padding_size;
aligned_size = iova_align(iovad, size); aligned_size = iova_align(iovad, size);
phys = swiotlb_tbl_map_single(dev, phys, size, phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
aligned_size, dir, attrs); iova_mask(iovad), dir, attrs);
if (phys == DMA_MAPPING_ERROR) if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
......
...@@ -380,7 +380,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -380,7 +380,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/ */
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs); map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR) if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
......
...@@ -45,7 +45,8 @@ extern void __init swiotlb_update_mem_attributes(void); ...@@ -45,7 +45,8 @@ extern void __init swiotlb_update_mem_attributes(void);
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
size_t mapping_size, size_t alloc_size, size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs); unsigned int alloc_aligned_mask, enum dma_data_direction dir,
unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev, extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr, phys_addr_t tlb_addr,
......
...@@ -459,7 +459,7 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) ...@@ -459,7 +459,7 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
* allocate a buffer from that IO TLB pool. * allocate a buffer from that IO TLB pool.
*/ */
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
size_t alloc_size) size_t alloc_size, unsigned int alloc_align_mask)
{ {
struct io_tlb_mem *mem = dev->dma_io_tlb_mem; struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
unsigned long boundary_mask = dma_get_seg_boundary(dev); unsigned long boundary_mask = dma_get_seg_boundary(dev);
...@@ -483,6 +483,7 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, ...@@ -483,6 +483,7 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
if (alloc_size >= PAGE_SIZE) if (alloc_size >= PAGE_SIZE)
stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
spin_lock_irqsave(&mem->lock, flags); spin_lock_irqsave(&mem->lock, flags);
if (unlikely(nslots > mem->nslabs - mem->used)) if (unlikely(nslots > mem->nslabs - mem->used))
...@@ -541,7 +542,8 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, ...@@ -541,7 +542,8 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
size_t mapping_size, size_t alloc_size, size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs) unsigned int alloc_align_mask, enum dma_data_direction dir,
unsigned long attrs)
{ {
struct io_tlb_mem *mem = dev->dma_io_tlb_mem; struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
unsigned int offset = swiotlb_align_offset(dev, orig_addr); unsigned int offset = swiotlb_align_offset(dev, orig_addr);
...@@ -561,7 +563,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, ...@@ -561,7 +563,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
return (phys_addr_t)DMA_MAPPING_ERROR; return (phys_addr_t)DMA_MAPPING_ERROR;
} }
index = swiotlb_find_slots(dev, orig_addr, alloc_size + offset); index = swiotlb_find_slots(dev, orig_addr,
alloc_size + offset, alloc_align_mask);
if (index == -1) { if (index == -1) {
if (!(attrs & DMA_ATTR_NO_WARN)) if (!(attrs & DMA_ATTR_NO_WARN))
dev_warn_ratelimited(dev, dev_warn_ratelimited(dev,
...@@ -675,7 +678,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, ...@@ -675,7 +678,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
swiotlb_force); swiotlb_force);
swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir, swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
attrs); attrs);
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
...@@ -759,7 +762,7 @@ struct page *swiotlb_alloc(struct device *dev, size_t size) ...@@ -759,7 +762,7 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
if (!mem) if (!mem)
return NULL; return NULL;
index = swiotlb_find_slots(dev, 0, size); index = swiotlb_find_slots(dev, 0, size, 0);
if (index == -1) if (index == -1)
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment