Commit decbda25 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman

powerpc/powernv/ioda/ioda2: Rework TCE invalidation in tce_build()/tce_free()

The pnv_pci_ioda_tce_invalidate() helper invalidates TCE cache. It is
supposed to be called on IODA1/2 and not called on p5ioc2. It receives
start and end host addresses of TCE table.

IODA2 actually needs PCI addresses to invalidate the cache. Those
can be calculated from host addresses but since we are going
to implement multi-level TCE tables, calculating PCI address from
a host address might get either tricky or ugly as TCE table remains flat
on PCI bus but not in RAM.

This moves pnv_pci_ioda_tce_invalidate() from generic pnv_tce_build/
pnt_tce_free and defines IODA1/2-specific callbacks which call generic
ones and do PHB-model-specific TCE cache invalidation. P5IOC2 keeps
using generic callbacks as before.

This changes pnv_pci_ioda2_tce_invalidate() to receives TCE index and
number of pages which are PCI addresses shifted by IOMMU page shift.

No change in behaviour is expected.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Reviewed-by: default avatarGavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent da004c36
...@@ -1679,18 +1679,19 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, ...@@ -1679,18 +1679,19 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
} }
} }
static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe, static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
struct iommu_table *tbl, unsigned long index, unsigned long npages, bool rm)
__be64 *startp, __be64 *endp, bool rm)
{ {
struct pnv_ioda_pe *pe = tbl->data;
__be64 __iomem *invalidate = rm ? __be64 __iomem *invalidate = rm ?
(__be64 __iomem *)pe->tce_inval_reg_phys : (__be64 __iomem *)pe->tce_inval_reg_phys :
(__be64 __iomem *)tbl->it_index; (__be64 __iomem *)tbl->it_index;
unsigned long start, end, inc; unsigned long start, end, inc;
const unsigned shift = tbl->it_page_shift; const unsigned shift = tbl->it_page_shift;
start = __pa(startp); start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
end = __pa(endp); end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
npages - 1);
/* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */ /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
if (tbl->it_busno) { if (tbl->it_busno) {
...@@ -1726,16 +1727,39 @@ static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe, ...@@ -1726,16 +1727,39 @@ static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe,
*/ */
} }
static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
attrs);
if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
return ret;
}
static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
long npages)
{
pnv_tce_free(tbl, index, npages);
if (tbl->it_type & TCE_PCI_SWINV_FREE)
pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
}
static struct iommu_table_ops pnv_ioda1_iommu_ops = { static struct iommu_table_ops pnv_ioda1_iommu_ops = {
.set = pnv_tce_build, .set = pnv_ioda1_tce_build,
.clear = pnv_tce_free, .clear = pnv_ioda1_tce_free,
.get = pnv_tce_get, .get = pnv_tce_get,
}; };
static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe, static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
struct iommu_table *tbl, unsigned long index, unsigned long npages, bool rm)
__be64 *startp, __be64 *endp, bool rm)
{ {
struct pnv_ioda_pe *pe = tbl->data;
unsigned long start, end, inc; unsigned long start, end, inc;
__be64 __iomem *invalidate = rm ? __be64 __iomem *invalidate = rm ?
(__be64 __iomem *)pe->tce_inval_reg_phys : (__be64 __iomem *)pe->tce_inval_reg_phys :
...@@ -1748,10 +1772,8 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe, ...@@ -1748,10 +1772,8 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
end = start; end = start;
/* Figure out the start, end and step */ /* Figure out the start, end and step */
inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64)); start |= (index << shift);
start |= (inc << shift); end |= ((index + npages - 1) << shift);
inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
end |= (inc << shift);
inc = (0x1ull << shift); inc = (0x1ull << shift);
mb(); mb();
...@@ -1764,21 +1786,32 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe, ...@@ -1764,21 +1786,32 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
} }
} }
void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
__be64 *startp, __be64 *endp, bool rm) long npages, unsigned long uaddr,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{ {
struct pnv_ioda_pe *pe = tbl->data; int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
struct pnv_phb *phb = pe->phb; attrs);
if (phb->type == PNV_PHB_IODA1) if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
pnv_pci_ioda1_tce_invalidate(pe, tbl, startp, endp, rm); pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
else
pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm); return ret;
}
static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
long npages)
{
pnv_tce_free(tbl, index, npages);
if (tbl->it_type & TCE_PCI_SWINV_FREE)
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
} }
static struct iommu_table_ops pnv_ioda2_iommu_ops = { static struct iommu_table_ops pnv_ioda2_iommu_ops = {
.set = pnv_tce_build, .set = pnv_ioda2_tce_build,
.clear = pnv_tce_free, .clear = pnv_ioda2_tce_free,
.get = pnv_tce_get, .get = pnv_tce_get,
}; };
......
...@@ -577,37 +577,28 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages, ...@@ -577,37 +577,28 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
struct dma_attrs *attrs) struct dma_attrs *attrs)
{ {
u64 proto_tce = iommu_direction_to_tce_perm(direction); u64 proto_tce = iommu_direction_to_tce_perm(direction);
__be64 *tcep, *tces; __be64 *tcep;
u64 rpn; u64 rpn;
tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset; tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
rpn = __pa(uaddr) >> tbl->it_page_shift; rpn = __pa(uaddr) >> tbl->it_page_shift;
while (npages--) while (npages--)
*(tcep++) = cpu_to_be64(proto_tce | *(tcep++) = cpu_to_be64(proto_tce |
(rpn++ << tbl->it_page_shift)); (rpn++ << tbl->it_page_shift));
/* Some implementations won't cache invalid TCEs and thus may not
* need that flush. We'll probably turn it_type into a bit mask
* of flags if that becomes the case
*/
if (tbl->it_type & TCE_PCI_SWINV_CREATE)
pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, false);
return 0; return 0;
} }
void pnv_tce_free(struct iommu_table *tbl, long index, long npages) void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
{ {
__be64 *tcep, *tces; __be64 *tcep;
tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset; tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
while (npages--) while (npages--)
*(tcep++) = cpu_to_be64(0); *(tcep++) = cpu_to_be64(0);
if (tbl->it_type & TCE_PCI_SWINV_FREE)
pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, false);
} }
unsigned long pnv_tce_get(struct iommu_table *tbl, long index) unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment