Commit dda0fed9 authored by David Dillow's avatar David Dillow Committed by David S. Miller

[SPARC64]: Handle SBUS dma allocations larger than 1MB.

Signed-off-by: default avatarDavid Dillow <dave@thedillows.org>
Signed-off-by: default avatarDavid S. Miller <davem@redhat.com>
parent 5e6b6426
...@@ -28,10 +28,10 @@ ...@@ -28,10 +28,10 @@
* *
* On SYSIO, using an 8K page size we have 1GB of SBUS * On SYSIO, using an 8K page size we have 1GB of SBUS
* DMA space mapped. We divide this space into equally * DMA space mapped. We divide this space into equally
* sized clusters. Currently we allow clusters up to a * sized clusters. We allocate a DMA mapping from the
* size of 1MB. If anything begins to generate DMA * cluster that matches the order of the allocation, or
* mapping requests larger than this we will need to * if the order is greater than the number of clusters,
* increase things a bit. * we try to allocate from the last cluster.
*/ */
#define NCLUSTERS 8UL #define NCLUSTERS 8UL
...@@ -134,12 +134,17 @@ static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npage ...@@ -134,12 +134,17 @@ static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npage
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages) static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
{ {
iopte_t *iopte, *limit, *first; iopte_t *iopte, *limit, *first, *cluster;
unsigned long cnum, ent, flush_point; unsigned long cnum, ent, nent, flush_point, found;
cnum = 0; cnum = 0;
nent = 1;
while ((1UL << cnum) < npages) while ((1UL << cnum) < npages)
cnum++; cnum++;
if(cnum >= NCLUSTERS) {
nent = 1UL << (cnum - NCLUSTERS);
cnum = NCLUSTERS - 1;
}
iopte = iommu->page_table + (cnum * CLUSTER_NPAGES); iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
if (cnum == 0) if (cnum == 0)
...@@ -152,22 +157,31 @@ static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long ...@@ -152,22 +157,31 @@ static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long
flush_point = iommu->alloc_info[cnum].flush; flush_point = iommu->alloc_info[cnum].flush;
first = iopte; first = iopte;
cluster = NULL;
found = 0;
for (;;) { for (;;) {
if (iopte_val(*iopte) == 0UL) { if (iopte_val(*iopte) == 0UL) {
if ((iopte + (1 << cnum)) >= limit) found++;
ent = 0; if (!cluster)
else cluster = iopte;
ent = ent + 1; } else {
iommu->alloc_info[cnum].next = ent; /* Used cluster in the way */
if (ent == flush_point) cluster = NULL;
__iommu_flushall(iommu); found = 0;
break;
} }
if (found == nent)
break;
iopte += (1 << cnum); iopte += (1 << cnum);
ent++; ent++;
if (iopte >= limit) { if (iopte >= limit) {
iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES)); iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
ent = 0; ent = 0;
/* Multiple cluster allocations must not wrap */
cluster = NULL;
found = 0;
} }
if (ent == flush_point) if (ent == flush_point)
__iommu_flushall(iommu); __iommu_flushall(iommu);
...@@ -175,8 +189,19 @@ static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long ...@@ -175,8 +189,19 @@ static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long
goto bad; goto bad;
} }
/* ent/iopte points to the last cluster entry we're going to use,
* so save our place for the next allocation.
*/
if ((iopte + (1 << cnum)) >= limit)
ent = 0;
else
ent = ent + 1;
iommu->alloc_info[cnum].next = ent;
if (ent == flush_point)
__iommu_flushall(iommu);
/* I've got your streaming cluster right here buddy boy... */ /* I've got your streaming cluster right here buddy boy... */
return iopte; return cluster;
bad: bad:
printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n", printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
...@@ -186,15 +211,23 @@ static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long ...@@ -186,15 +211,23 @@ static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long
static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages) static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
{ {
unsigned long cnum, ent; unsigned long cnum, ent, nent;
iopte_t *iopte; iopte_t *iopte;
cnum = 0; cnum = 0;
nent = 1;
while ((1UL << cnum) < npages) while ((1UL << cnum) < npages)
cnum++; cnum++;
if(cnum >= NCLUSTERS) {
nent = 1UL << (cnum - NCLUSTERS);
cnum = NCLUSTERS - 1;
}
ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum); ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT); iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
iopte_val(*iopte) = 0UL; do {
iopte_val(*iopte) = 0UL;
iopte += 1 << cnum;
} while(--nent);
/* If the global flush might not have caught this entry, /* If the global flush might not have caught this entry,
* adjust the flush point such that we will flush before * adjust the flush point such that we will flush before
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment