Commit 23e79425 authored by Gavin Shan's avatar Gavin Shan Committed by Michael Ellerman

powerpc/powernv: Simplify pnv_ioda_setup_pe_seg()

pnv_ioda_setup_pe_seg() associates the IO and M32 segments with the
owner PE. The code mapping segments should be fixed and immune from
logic changes introduced to pnv_ioda_setup_pe_seg().

This moves the code mapping segments to helper pnv_ioda_setup_pe_res().
The data type for @rc is changed to "int64_t". Also, argument @hose is
removed from pnv_ioda_setup_pe() as it can be got from @pe. No functional
changes introduced.
Signed-off-by: default avatarGavin Shan <gwshan@linux.vnet.ibm.com>
Reviewed-By: default avatarAlistair Popple <alistair@popple.id.au>
Reviewed-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 3fa23ff8
...@@ -2929,31 +2929,16 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) ...@@ -2929,31 +2929,16 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
} }
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
/* static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
* This function is supposed to be called on basis of PE from top struct resource *res)
* to bottom style. So the the I/O or MMIO segment assigned to
* parent PE could be overrided by its child PEs if necessary.
*/
static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
struct pnv_ioda_pe *pe)
{ {
struct pnv_phb *phb = hose->private_data; struct pnv_phb *phb = pe->phb;
struct pci_bus_region region; struct pci_bus_region region;
struct resource *res; int index;
int i, index; int64_t rc;
int rc;
/*
* NOTE: We only care PCI bus based PE for now. For PCI
* device based PE, for example SRIOV sensitive VF should
* be figured out later.
*/
BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
pci_bus_for_each_resource(pe->pbus, res, i) { if (!res || !res->flags || res->start > res->end)
if (!res || !res->flags || return;
res->start > res->end)
continue;
if (res->flags & IORESOURCE_IO) { if (res->flags & IORESOURCE_IO) {
region.start = res->start - phb->ioda.io_pci_base; region.start = res->start - phb->ioda.io_pci_base;
...@@ -2966,8 +2951,7 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose, ...@@ -2966,8 +2951,7 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
rc = opal_pci_map_pe_mmio_window(phb->opal_id, rc = opal_pci_map_pe_mmio_window(phb->opal_id,
pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
if (rc != OPAL_SUCCESS) { if (rc != OPAL_SUCCESS) {
pr_err("%s: OPAL error %d when mapping IO " pr_err("%s: Error %lld mapping IO segment#%d to PE#%d\n",
"segment #%d to PE#%d\n",
__func__, rc, index, pe->pe_number); __func__, rc, index, pe->pe_number);
break; break;
} }
...@@ -2978,10 +2962,10 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose, ...@@ -2978,10 +2962,10 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
} else if ((res->flags & IORESOURCE_MEM) && } else if ((res->flags & IORESOURCE_MEM) &&
!pnv_pci_is_mem_pref_64(res->flags)) { !pnv_pci_is_mem_pref_64(res->flags)) {
region.start = res->start - region.start = res->start -
hose->mem_offset[0] - phb->hose->mem_offset[0] -
phb->ioda.m32_pci_base; phb->ioda.m32_pci_base;
region.end = res->end - region.end = res->end -
hose->mem_offset[0] - phb->hose->mem_offset[0] -
phb->ioda.m32_pci_base; phb->ioda.m32_pci_base;
index = region.start / phb->ioda.m32_segsize; index = region.start / phb->ioda.m32_segsize;
...@@ -2991,8 +2975,7 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose, ...@@ -2991,8 +2975,7 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
rc = opal_pci_map_pe_mmio_window(phb->opal_id, rc = opal_pci_map_pe_mmio_window(phb->opal_id,
pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
if (rc != OPAL_SUCCESS) { if (rc != OPAL_SUCCESS) {
pr_err("%s: OPAL error %d when mapping M32 " pr_err("%s: Error %lld mapping M32 segment#%d to PE#%d",
"segment#%d to PE#%d",
__func__, rc, index, pe->pe_number); __func__, rc, index, pe->pe_number);
break; break;
} }
...@@ -3001,7 +2984,27 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose, ...@@ -3001,7 +2984,27 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
index++; index++;
} }
} }
} }
/*
* This function is supposed to be called on basis of PE from top
* to bottom style. So the the I/O or MMIO segment assigned to
* parent PE could be overrided by its child PEs if necessary.
*/
static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
{
struct resource *res;
int i;
/*
* NOTE: We only care PCI bus based PE for now. For PCI
* device based PE, for example SRIOV sensitive VF should
* be figured out later.
*/
BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
pci_bus_for_each_resource(pe->pbus, res, i)
pnv_ioda_setup_pe_res(pe, res);
} }
static void pnv_pci_ioda_setup_seg(void) static void pnv_pci_ioda_setup_seg(void)
...@@ -3018,7 +3021,7 @@ static void pnv_pci_ioda_setup_seg(void) ...@@ -3018,7 +3021,7 @@ static void pnv_pci_ioda_setup_seg(void)
continue; continue;
list_for_each_entry(pe, &phb->ioda.pe_list, list) { list_for_each_entry(pe, &phb->ioda.pe_list, list) {
pnv_ioda_setup_pe_seg(hose, pe); pnv_ioda_setup_pe_seg(pe);
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment