Commit 1d8b0e79 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v4.4-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:

 - Two build issues, one in the ipmmu-vmsa driver and one for the new
   generic dma-api implemention used on arm64

 - A performance fix for said dma-api implemention

 - An issue caused by a wrong offset in map_sg in the same code as above

* tag 'iommu-fixes-v4.4-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/dma: Use correct offset in map_sg
  iommu/ipmmu-vmsa: Don't truncate ttbr if LPAE is not enabled
  iommu/dma: Avoid unlikely high-order allocations
  iommu/dma: Add some missing #includes
parents 2626820d 164afb1d
...@@ -21,10 +21,13 @@ ...@@ -21,10 +21,13 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-iommu.h> #include <linux/dma-iommu.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h> #include <linux/huge_mm.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/iova.h> #include <linux/iova.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
int iommu_dma_init(void) int iommu_dma_init(void)
{ {
...@@ -191,6 +194,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp) ...@@ -191,6 +194,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
{ {
struct page **pages; struct page **pages;
unsigned int i = 0, array_size = count * sizeof(*pages); unsigned int i = 0, array_size = count * sizeof(*pages);
unsigned int order = MAX_ORDER;
if (array_size <= PAGE_SIZE) if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL); pages = kzalloc(array_size, GFP_KERNEL);
...@@ -204,14 +208,15 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp) ...@@ -204,14 +208,15 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
while (count) { while (count) {
struct page *page = NULL; struct page *page = NULL;
int j, order = __fls(count); int j;
/* /*
* Higher-order allocations are a convenience rather * Higher-order allocations are a convenience rather
* than a necessity, hence using __GFP_NORETRY until * than a necessity, hence using __GFP_NORETRY until
* falling back to single-page allocations. * falling back to single-page allocations.
*/ */
for (order = min(order, MAX_ORDER); order > 0; order--) { for (order = min_t(unsigned int, order, __fls(count));
order > 0; order--) {
page = alloc_pages(gfp | __GFP_NORETRY, order); page = alloc_pages(gfp | __GFP_NORETRY, order);
if (!page) if (!page)
continue; continue;
...@@ -453,7 +458,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -453,7 +458,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t s_offset = iova_offset(iovad, s->offset); size_t s_offset = iova_offset(iovad, s->offset);
size_t s_length = s->length; size_t s_length = s->length;
sg_dma_address(s) = s->offset; sg_dma_address(s) = s_offset;
sg_dma_len(s) = s_length; sg_dma_len(s) = s_length;
s->offset -= s_offset; s->offset -= s_offset;
s_length = iova_align(iovad, s_length + s_offset); s_length = iova_align(iovad, s_length + s_offset);
......
...@@ -295,7 +295,7 @@ static struct iommu_gather_ops ipmmu_gather_ops = { ...@@ -295,7 +295,7 @@ static struct iommu_gather_ops ipmmu_gather_ops = {
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{ {
phys_addr_t ttbr; u64 ttbr;
/* /*
* Allocate the page table operations. * Allocate the page table operations.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment