Commit 13fa14e1 authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

[SPARC64]: Add SG merging support back into IOMMU code.

Mimicks almost perfectly the powerpc IOMMU code, except that it
doesn't have the IOMMU_PAGE_SIZE != PAGE_SIZE handling, and it also
lacks the device dma mask support bits.

I'll add that later as time permits, but this gets us at least back to
where we were beforehand.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d284142c
...@@ -512,124 +512,209 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr, ...@@ -512,124 +512,209 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
unsigned long flags, ctx, i, npages, iopte_protection; struct scatterlist *s, *outs, *segstart;
struct scatterlist *sg; unsigned long flags, handle, prot, ctx;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
int outcount, incount, i;
struct strbuf *strbuf; struct strbuf *strbuf;
struct iommu *iommu; struct iommu *iommu;
iopte_t *base;
u32 dma_base; BUG_ON(direction == DMA_NONE);
/* Fast path single entry scatterlists. */
if (nelems == 1) {
sglist->dma_address =
dma_4u_map_single(dev, sg_virt(sglist),
sglist->length, direction);
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
return 0;
sglist->dma_length = sglist->length;
return 1;
}
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc; strbuf = dev->archdata.stc;
if (nelems == 0 || !iommu)
if (unlikely(direction == DMA_NONE)) return 0;
goto bad_no_ctx;
npages = calc_npages(sglist, nelems);
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(dev, iommu, npages);
ctx = 0; ctx = 0;
if (iommu->iommu_ctxflush) if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu); ctx = iommu_alloc_ctx(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
if (base == NULL)
goto bad;
dma_base = iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT);
if (strbuf->strbuf_enabled) if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx); prot = IOPTE_STREAMING(ctx);
else else
iopte_protection = IOPTE_CONSISTENT(ctx); prot = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE) if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE; prot |= IOPTE_WRITE;
for_each_sg(sglist, sg, nelems, i) { outs = s = segstart = &sglist[0];
unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg); outcount = 1;
unsigned long slen = sg->length; incount = nelems;
unsigned long this_npages; handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev);
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, slen;
iopte_t *base;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
this_npages = iommu_num_pages(paddr, slen); base = iommu->page_table + entry;
sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK); /* Convert entry to a dma_addr_t */
sg->dma_length = slen; dma_addr = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK; paddr &= IO_PAGE_MASK;
while (this_npages--) { while (npages--) {
iopte_val(*base) = iopte_protection | paddr; iopte_val(*base) = prot | paddr;
base++; base++;
paddr += IO_PAGE_SIZE; paddr += IO_PAGE_SIZE;
dma_base += IO_PAGE_SIZE;
} }
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
} }
return nelems; spin_unlock_irqrestore(&iommu->lock, flags);
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
return outcount;
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages, entry, i;
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length);
iommu_range_free(iommu, vaddr, npages);
entry = (vaddr - iommu->page_table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
spin_unlock_irqrestore(&iommu->lock, flags);
bad:
iommu_free_ctx(iommu, ctx);
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
return 0; return 0;
} }
/* If contexts are being used, they are the same in all of the mappings
* we make for a particular SG.
*/
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
{
unsigned long ctx = 0;
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
return ctx;
}
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
unsigned long flags, ctx, i, npages; unsigned long flags, ctx;
struct scatterlist *sg;
struct strbuf *strbuf; struct strbuf *strbuf;
struct iommu *iommu; struct iommu *iommu;
iopte_t *base;
u32 bus_addr;
if (unlikely(direction == DMA_NONE)) { BUG_ON(direction == DMA_NONE);
if (printk_ratelimit())
WARN_ON(1);
}
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc; strbuf = dev->archdata.stc;
bus_addr = sglist->dma_address & IO_PAGE_MASK; ctx = fetch_sg_ctx(iommu, sglist);
npages = calc_npages(sglist, nelems); spin_lock_irqsave(&iommu->lock, flags);
base = iommu->page_table + sg = sglist;
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages, entry;
iopte_t *base;
int i;
spin_lock_irqsave(&iommu->lock, flags); if (!len)
break;
npages = iommu_num_pages(dma_handle, len);
iommu_range_free(iommu, dma_handle, npages);
/* Record the context, if any. */ entry = ((dma_handle - iommu->page_table_map_base)
ctx = 0; >> IO_PAGE_SHIFT);
if (iommu->iommu_ctxflush) base = iommu->page_table + entry;
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */ dma_handle &= IO_PAGE_MASK;
if (strbuf->strbuf_enabled) if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); strbuf_flush(strbuf, iommu, dma_handle, ctx,
npages, direction);
/* Step 2: Clear out the TSB entries. */ for (i = 0; i < npages; i++)
for (i = 0; i < npages; i++) iopte_make_dummy(iommu, base + i);
iopte_make_dummy(iommu, base + i);
iommu_range_free(iommu, bus_addr, npages); sg = sg_next(sg);
}
iommu_free_ctx(iommu, ctx); iommu_free_ctx(iommu, ctx);
......
...@@ -89,6 +89,17 @@ static long iommu_batch_flush(struct iommu_batch *p) ...@@ -89,6 +89,17 @@ static long iommu_batch_flush(struct iommu_batch *p)
return 0; return 0;
} }
static inline void iommu_batch_new_entry(unsigned long entry)
{
struct iommu_batch *p = &__get_cpu_var(iommu_batch);
if (p->entry + p->npages == entry)
return;
if (p->entry != ~0UL)
iommu_batch_flush(p);
p->entry = entry;
}
/* Interrupts must be disabled. */ /* Interrupts must be disabled. */
static inline long iommu_batch_add(u64 phys_page) static inline long iommu_batch_add(u64 phys_page)
{ {
...@@ -320,88 +331,131 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, ...@@ -320,88 +331,131 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
unsigned long flags, npages, i, prot; struct scatterlist *s, *outs, *segstart;
u32 dma_base, orig_dma_base; unsigned long flags, handle, prot;
struct scatterlist *sg; dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
int outcount, incount, i;
struct iommu *iommu; struct iommu *iommu;
long entry, err; long err;
/* Fast path single entry scatterlists. */ BUG_ON(direction == DMA_NONE);
if (nelems == 1) {
sglist->dma_address =
dma_4v_map_single(dev, sg_virt(sglist),
sglist->length, direction);
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
return 0;
sglist->dma_length = sglist->length;
return 1;
}
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
if (nelems == 0 || !iommu)
return 0;
if (unlikely(direction == DMA_NONE)) prot = HV_PCI_MAP_ATTR_READ;
goto bad; if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
npages = calc_npages(sglist, nelems);
spin_lock_irqsave(&iommu->lock, flags); outs = s = segstart = &sglist[0];
entry = iommu_range_alloc(dev, iommu, npages, NULL); outcount = 1;
spin_unlock_irqrestore(&iommu->lock, flags); incount = nelems;
handle = 0;
if (unlikely(entry == DMA_ERROR_CODE)) /* Init first segment length for backout at failure */
goto bad; outs->dma_length = 0;
orig_dma_base = dma_base = iommu->page_table_map_base + spin_lock_irqsave(&iommu->lock, flags);
(entry << IO_PAGE_SHIFT);
prot = HV_PCI_MAP_ATTR_READ; iommu_batch_start(dev, prot, ~0UL);
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
local_irq_save(flags); max_seg_size = dma_get_max_seg_size(dev);
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, slen;
iommu_batch_start(dev, prot, entry); slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
for_each_sg(sglist, sg, nelems, i) { /* Handle failure */
unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg); if (unlikely(entry == DMA_ERROR_CODE)) {
unsigned long slen = sg->length; if (printk_ratelimit())
unsigned long this_npages; printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
this_npages = iommu_num_pages(paddr, slen); iommu_batch_new_entry(entry);
sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK); /* Convert entry to a dma_addr_t */
sg->dma_length = slen; dma_addr = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK; paddr &= IO_PAGE_MASK;
while (this_npages--) { while (npages--) {
err = iommu_batch_add(paddr); err = iommu_batch_add(paddr);
if (unlikely(err < 0L)) { if (unlikely(err < 0L))
local_irq_restore(flags);
goto iommu_map_failed; goto iommu_map_failed;
paddr += IO_PAGE_SIZE;
}
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
} }
}
paddr += IO_PAGE_SIZE; if (segstart == s) {
dma_base += IO_PAGE_SIZE; /* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
} }
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
} }
err = iommu_batch_end(); err = iommu_batch_end();
local_irq_restore(flags);
if (unlikely(err < 0L)) if (unlikely(err < 0L))
goto iommu_map_failed; goto iommu_map_failed;
return nelems; spin_unlock_irqrestore(&iommu->lock, flags);
bad: if (outcount < incount) {
if (printk_ratelimit()) outs = sg_next(outs);
WARN_ON(1); outs->dma_address = DMA_ERROR_CODE;
return 0; outs->dma_length = 0;
}
return outcount;
iommu_map_failed: iommu_map_failed:
spin_lock_irqsave(&iommu->lock, flags); for_each_sg(sglist, s, nelems, i) {
iommu_range_free(iommu, orig_dma_base, npages); if (s->dma_length != 0) {
unsigned long vaddr, npages;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length);
iommu_range_free(iommu, vaddr, npages);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
return 0; return 0;
...@@ -410,39 +464,43 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -410,39 +464,43 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
unsigned long flags, npages;
struct pci_pbm_info *pbm; struct pci_pbm_info *pbm;
u32 devhandle, bus_addr; struct scatterlist *sg;
struct iommu *iommu; struct iommu *iommu;
long entry; unsigned long flags;
u32 devhandle;
if (unlikely(direction == DMA_NONE)) { BUG_ON(direction == DMA_NONE);
if (printk_ratelimit())
WARN_ON(1);
}
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller; pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle; devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
npages = calc_npages(sglist, nelems);
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
iommu_range_free(iommu, bus_addr, npages); sg = sglist;
while (nelems--) {
do { dma_addr_t dma_handle = sg->dma_address;
unsigned long num; unsigned int len = sg->dma_length;
unsigned long npages, entry;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len);
iommu_range_free(iommu, dma_handle, npages);
entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
while (npages) {
unsigned long num;
num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
npages);
entry += num;
npages -= num;
}
num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), sg = sg_next(sg);
npages); }
entry += num;
npages -= num;
} while (npages != 0);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
/* BIO layer definitions. */ /* BIO layer definitions. */
extern unsigned long kern_base, kern_size; extern unsigned long kern_base, kern_size;
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define BIO_VMERGE_BOUNDARY 0 #define BIO_VMERGE_BOUNDARY 8192
static inline u8 _inb(unsigned long addr) static inline u8 _inb(unsigned long addr)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment