Commit 48ea0218 authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Jon Mason

ntb_hw_intel: Style fixes: open code macros that just obfuscate code

As per a comments in [1] by Greg Kroah-Hartman, the ndev_* macros should
be cleaned up. This makes it more clear what's actually going on when
reading the code.

[1] http://www.spinics.net/lists/linux-pci/msg56904.htmlSigned-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarJon Mason <jdmason@kudzu.us>
parent 0f9bfb97
...@@ -272,12 +272,12 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev, ...@@ -272,12 +272,12 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
if (db_addr) { if (db_addr) {
*db_addr = reg_addr + reg; *db_addr = reg_addr + reg;
dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr); dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
} }
if (db_size) { if (db_size) {
*db_size = ndev->reg->db_size; *db_size = ndev->reg->db_size;
dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size); dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
} }
return 0; return 0;
...@@ -370,7 +370,8 @@ static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx, ...@@ -370,7 +370,8 @@ static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
if (spad_addr) { if (spad_addr) {
*spad_addr = reg_addr + reg + (idx << 2); *spad_addr = reg_addr + reg + (idx << 2);
dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr); dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
*spad_addr);
} }
return 0; return 0;
...@@ -411,7 +412,7 @@ static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec) ...@@ -411,7 +412,7 @@ static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31)) if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
vec_mask |= ndev->db_link_mask; vec_mask |= ndev->db_link_mask;
dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask); dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
ndev->last_ts = jiffies; ndev->last_ts = jiffies;
...@@ -430,7 +431,7 @@ static irqreturn_t ndev_vec_isr(int irq, void *dev) ...@@ -430,7 +431,7 @@ static irqreturn_t ndev_vec_isr(int irq, void *dev)
{ {
struct intel_ntb_vec *nvec = dev; struct intel_ntb_vec *nvec = dev;
dev_dbg(ndev_dev(nvec->ndev), "irq: %d nvec->num: %d\n", dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n",
irq, nvec->num); irq, nvec->num);
return ndev_interrupt(nvec->ndev, nvec->num); return ndev_interrupt(nvec->ndev, nvec->num);
...@@ -440,7 +441,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev) ...@@ -440,7 +441,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev)
{ {
struct intel_ntb_dev *ndev = dev; struct intel_ntb_dev *ndev = dev;
return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq); return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
} }
static int ndev_init_isr(struct intel_ntb_dev *ndev, static int ndev_init_isr(struct intel_ntb_dev *ndev,
...@@ -450,7 +451,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev, ...@@ -450,7 +451,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev,
struct pci_dev *pdev; struct pci_dev *pdev;
int rc, i, msix_count, node; int rc, i, msix_count, node;
pdev = ndev_pdev(ndev); pdev = ndev->ntb.pdev;
node = dev_to_node(&pdev->dev); node = dev_to_node(&pdev->dev);
...@@ -489,7 +490,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev, ...@@ -489,7 +490,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev,
goto err_msix_request; goto err_msix_request;
} }
dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count); dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
ndev->db_vec_count = msix_count; ndev->db_vec_count = msix_count;
ndev->db_vec_shift = msix_shift; ndev->db_vec_shift = msix_shift;
return 0; return 0;
...@@ -517,7 +518,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev, ...@@ -517,7 +518,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev,
if (rc) if (rc)
goto err_msi_request; goto err_msi_request;
dev_dbg(ndev_dev(ndev), "Using msi interrupts\n"); dev_dbg(&pdev->dev, "Using msi interrupts\n");
ndev->db_vec_count = 1; ndev->db_vec_count = 1;
ndev->db_vec_shift = total_shift; ndev->db_vec_shift = total_shift;
return 0; return 0;
...@@ -535,7 +536,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev, ...@@ -535,7 +536,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev,
if (rc) if (rc)
goto err_intx_request; goto err_intx_request;
dev_dbg(ndev_dev(ndev), "Using intx interrupts\n"); dev_dbg(&pdev->dev, "Using intx interrupts\n");
ndev->db_vec_count = 1; ndev->db_vec_count = 1;
ndev->db_vec_shift = total_shift; ndev->db_vec_shift = total_shift;
return 0; return 0;
...@@ -549,7 +550,7 @@ static void ndev_deinit_isr(struct intel_ntb_dev *ndev) ...@@ -549,7 +550,7 @@ static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
struct pci_dev *pdev; struct pci_dev *pdev;
int i; int i;
pdev = ndev_pdev(ndev); pdev = ndev->ntb.pdev;
/* Mask all doorbell interrupts */ /* Mask all doorbell interrupts */
ndev->db_mask = ndev->db_valid_mask; ndev->db_mask = ndev->db_valid_mask;
...@@ -746,7 +747,7 @@ static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf, ...@@ -746,7 +747,7 @@ static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
union { u64 v64; u32 v32; u16 v16; u8 v8; } u; union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
ndev = filp->private_data; ndev = filp->private_data;
pdev = ndev_pdev(ndev); pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio; mmio = ndev->self_mmio;
buf_size = min(count, 0x800ul); buf_size = min(count, 0x800ul);
...@@ -1021,7 +1022,8 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev) ...@@ -1021,7 +1022,8 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
ndev->debugfs_info = NULL; ndev->debugfs_info = NULL;
} else { } else {
ndev->debugfs_dir = ndev->debugfs_dir =
debugfs_create_dir(ndev_name(ndev), debugfs_dir); debugfs_create_dir(pci_name(ndev->ntb.pdev),
debugfs_dir);
if (!ndev->debugfs_dir) if (!ndev->debugfs_dir)
ndev->debugfs_info = NULL; ndev->debugfs_info = NULL;
else else
...@@ -1219,13 +1221,13 @@ static int intel_ntb_link_enable(struct ntb_dev *ntb, ...@@ -1219,13 +1221,13 @@ static int intel_ntb_link_enable(struct ntb_dev *ntb,
if (ndev->ntb.topo == NTB_TOPO_SEC) if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL; return -EINVAL;
dev_dbg(ndev_dev(ndev), dev_dbg(&ntb->pdev->dev,
"Enabling link with max_speed %d max_width %d\n", "Enabling link with max_speed %d max_width %d\n",
max_speed, max_width); max_speed, max_width);
if (max_speed != NTB_SPEED_AUTO) if (max_speed != NTB_SPEED_AUTO)
dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed); dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
if (max_width != NTB_WIDTH_AUTO) if (max_width != NTB_WIDTH_AUTO)
dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width); dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK); ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
...@@ -1248,7 +1250,7 @@ static int intel_ntb_link_disable(struct ntb_dev *ntb) ...@@ -1248,7 +1250,7 @@ static int intel_ntb_link_disable(struct ntb_dev *ntb)
if (ndev->ntb.topo == NTB_TOPO_SEC) if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL; return -EINVAL;
dev_dbg(ndev_dev(ndev), "Disabling link\n"); dev_dbg(&ntb->pdev->dev, "Disabling link\n");
/* Bring NTB link down */ /* Bring NTB link down */
ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
...@@ -1485,30 +1487,33 @@ static int atom_link_is_err(struct intel_ntb_dev *ndev) ...@@ -1485,30 +1487,33 @@ static int atom_link_is_err(struct intel_ntb_dev *ndev)
static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
{ {
struct device *dev = &ndev->ntb.pdev->dev;
switch (ppd & ATOM_PPD_TOPO_MASK) { switch (ppd & ATOM_PPD_TOPO_MASK) {
case ATOM_PPD_TOPO_B2B_USD: case ATOM_PPD_TOPO_B2B_USD:
dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd); dev_dbg(dev, "PPD %d B2B USD\n", ppd);
return NTB_TOPO_B2B_USD; return NTB_TOPO_B2B_USD;
case ATOM_PPD_TOPO_B2B_DSD: case ATOM_PPD_TOPO_B2B_DSD:
dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd); dev_dbg(dev, "PPD %d B2B DSD\n", ppd);
return NTB_TOPO_B2B_DSD; return NTB_TOPO_B2B_DSD;
case ATOM_PPD_TOPO_PRI_USD: case ATOM_PPD_TOPO_PRI_USD:
case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */ case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
case ATOM_PPD_TOPO_SEC_USD: case ATOM_PPD_TOPO_SEC_USD:
case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */ case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd); dev_dbg(dev, "PPD %d non B2B disabled\n", ppd);
return NTB_TOPO_NONE; return NTB_TOPO_NONE;
} }
dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd); dev_dbg(dev, "PPD %d invalid\n", ppd);
return NTB_TOPO_NONE; return NTB_TOPO_NONE;
} }
static void atom_link_hb(struct work_struct *work) static void atom_link_hb(struct work_struct *work)
{ {
struct intel_ntb_dev *ndev = hb_ndev(work); struct intel_ntb_dev *ndev = hb_ndev(work);
struct device *dev = &ndev->ntb.pdev->dev;
unsigned long poll_ts; unsigned long poll_ts;
void __iomem *mmio; void __iomem *mmio;
u32 status32; u32 status32;
...@@ -1546,30 +1551,30 @@ static void atom_link_hb(struct work_struct *work) ...@@ -1546,30 +1551,30 @@ static void atom_link_hb(struct work_struct *work)
/* Clear AER Errors, write to clear */ /* Clear AER Errors, write to clear */
status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET); status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32); dev_dbg(dev, "ERRCORSTS = %x\n", status32);
status32 &= PCI_ERR_COR_REP_ROLL; status32 &= PCI_ERR_COR_REP_ROLL;
iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET); iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
/* Clear unexpected electrical idle event in LTSSM, write to clear */ /* Clear unexpected electrical idle event in LTSSM, write to clear */
status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET); status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32); dev_dbg(dev, "LTSSMERRSTS0 = %x\n", status32);
status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI; status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET); iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
/* Clear DeSkew Buffer error, write to clear */ /* Clear DeSkew Buffer error, write to clear */
status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET); status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32); dev_dbg(dev, "DESKEWSTS = %x\n", status32);
status32 |= ATOM_DESKEWSTS_DBERR; status32 |= ATOM_DESKEWSTS_DBERR;
iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET); iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET); status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32); dev_dbg(dev, "IBSTERRRCRVSTS0 = %x\n", status32);
status32 &= ATOM_IBIST_ERR_OFLOW; status32 &= ATOM_IBIST_ERR_OFLOW;
iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET); iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
/* Releases the NTB state machine to allow the link to retrain */ /* Releases the NTB state machine to allow the link to retrain */
status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET); status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32); dev_dbg(dev, "LTSSMSTATEJMP = %x\n", status32);
status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT; status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET); iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
...@@ -1742,11 +1747,11 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -1742,11 +1747,11 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
int b2b_bar; int b2b_bar;
u8 bar_sz; u8 bar_sz;
pdev = ndev_pdev(ndev); pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio; mmio = ndev->self_mmio;
if (ndev->b2b_idx == UINT_MAX) { if (ndev->b2b_idx == UINT_MAX) {
dev_dbg(ndev_dev(ndev), "not using b2b mw\n"); dev_dbg(&pdev->dev, "not using b2b mw\n");
b2b_bar = 0; b2b_bar = 0;
ndev->b2b_off = 0; ndev->b2b_off = 0;
} else { } else {
...@@ -1754,24 +1759,21 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -1754,24 +1759,21 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
if (b2b_bar < 0) if (b2b_bar < 0)
return -EIO; return -EIO;
dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar); dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size); dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) { if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
dev_dbg(ndev_dev(ndev), dev_dbg(&pdev->dev, "b2b using first half of bar\n");
"b2b using first half of bar\n");
ndev->b2b_off = bar_size >> 1; ndev->b2b_off = bar_size >> 1;
} else if (bar_size >= XEON_B2B_MIN_SIZE) { } else if (bar_size >= XEON_B2B_MIN_SIZE) {
dev_dbg(ndev_dev(ndev), dev_dbg(&pdev->dev, "b2b using whole bar\n");
"b2b using whole bar\n");
ndev->b2b_off = 0; ndev->b2b_off = 0;
--ndev->mw_count; --ndev->mw_count;
} else { } else {
dev_dbg(ndev_dev(ndev), dev_dbg(&pdev->dev, "b2b bar size is too small\n");
"b2b bar size is too small\n");
return -EIO; return -EIO;
} }
} }
...@@ -1781,7 +1783,7 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -1781,7 +1783,7 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
* except disable or halve the size of the b2b secondary bar. * except disable or halve the size of the b2b secondary bar.
*/ */
pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz);
if (b2b_bar == 1) { if (b2b_bar == 1) {
if (ndev->b2b_off) if (ndev->b2b_off)
bar_sz -= 1; bar_sz -= 1;
...@@ -1791,10 +1793,10 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -1791,10 +1793,10 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz); pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz);
pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz);
if (b2b_bar == 2) { if (b2b_bar == 2) {
if (ndev->b2b_off) if (ndev->b2b_off)
bar_sz -= 1; bar_sz -= 1;
...@@ -1804,7 +1806,7 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -1804,7 +1806,7 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz); pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz);
/* SBAR01 hit by first part of the b2b bar */ /* SBAR01 hit by first part of the b2b bar */
if (b2b_bar == 0) if (b2b_bar == 0)
...@@ -1820,12 +1822,12 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -1820,12 +1822,12 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0); bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
/* zero incoming translation addrs */ /* zero incoming translation addrs */
iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET); iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
...@@ -1895,7 +1897,7 @@ static int skx_init_dev(struct intel_ntb_dev *ndev) ...@@ -1895,7 +1897,7 @@ static int skx_init_dev(struct intel_ntb_dev *ndev)
u8 ppd; u8 ppd;
int rc; int rc;
pdev = ndev_pdev(ndev); pdev = ndev->ntb.pdev;
ndev->reg = &skx_reg; ndev->reg = &skx_reg;
...@@ -1904,7 +1906,7 @@ static int skx_init_dev(struct intel_ntb_dev *ndev) ...@@ -1904,7 +1906,7 @@ static int skx_init_dev(struct intel_ntb_dev *ndev)
return -EIO; return -EIO;
ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd, dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
ntb_topo_string(ndev->ntb.topo)); ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE) if (ndev->ntb.topo == NTB_TOPO_NONE)
return -EINVAL; return -EINVAL;
...@@ -1928,14 +1930,14 @@ static int intel_ntb3_link_enable(struct ntb_dev *ntb, ...@@ -1928,14 +1930,14 @@ static int intel_ntb3_link_enable(struct ntb_dev *ntb,
ndev = container_of(ntb, struct intel_ntb_dev, ntb); ndev = container_of(ntb, struct intel_ntb_dev, ntb);
dev_dbg(ndev_dev(ndev), dev_dbg(&ntb->pdev->dev,
"Enabling link with max_speed %d max_width %d\n", "Enabling link with max_speed %d max_width %d\n",
max_speed, max_width); max_speed, max_width);
if (max_speed != NTB_SPEED_AUTO) if (max_speed != NTB_SPEED_AUTO)
dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed); dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
if (max_width != NTB_WIDTH_AUTO) if (max_width != NTB_WIDTH_AUTO)
dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width); dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK); ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
...@@ -1999,7 +2001,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, ...@@ -1999,7 +2001,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return -EIO; return -EIO;
} }
dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val); dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
/* set and verify setting the limit */ /* set and verify setting the limit */
iowrite64(limit, mmio + limit_reg); iowrite64(limit, mmio + limit_reg);
...@@ -2010,7 +2012,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, ...@@ -2010,7 +2012,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return -EIO; return -EIO;
} }
dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val); dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
/* setup the EP */ /* setup the EP */
limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000; limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
...@@ -2031,7 +2033,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, ...@@ -2031,7 +2033,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return -EIO; return -EIO;
} }
dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val); dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
return 0; return 0;
} }
...@@ -2138,7 +2140,7 @@ static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd) ...@@ -2138,7 +2140,7 @@ static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd) static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
{ {
if (ppd & XEON_PPD_SPLIT_BAR_MASK) { if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd); dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
return 1; return 1;
} }
return 0; return 0;
...@@ -2168,11 +2170,11 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2168,11 +2170,11 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
int b2b_bar; int b2b_bar;
u8 bar_sz; u8 bar_sz;
pdev = ndev_pdev(ndev); pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio; mmio = ndev->self_mmio;
if (ndev->b2b_idx == UINT_MAX) { if (ndev->b2b_idx == UINT_MAX) {
dev_dbg(ndev_dev(ndev), "not using b2b mw\n"); dev_dbg(&pdev->dev, "not using b2b mw\n");
b2b_bar = 0; b2b_bar = 0;
ndev->b2b_off = 0; ndev->b2b_off = 0;
} else { } else {
...@@ -2180,24 +2182,21 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2180,24 +2182,21 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
if (b2b_bar < 0) if (b2b_bar < 0)
return -EIO; return -EIO;
dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar); dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size); dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) { if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
dev_dbg(ndev_dev(ndev), dev_dbg(&pdev->dev, "b2b using first half of bar\n");
"b2b using first half of bar\n");
ndev->b2b_off = bar_size >> 1; ndev->b2b_off = bar_size >> 1;
} else if (XEON_B2B_MIN_SIZE <= bar_size) { } else if (XEON_B2B_MIN_SIZE <= bar_size) {
dev_dbg(ndev_dev(ndev), dev_dbg(&pdev->dev, "b2b using whole bar\n");
"b2b using whole bar\n");
ndev->b2b_off = 0; ndev->b2b_off = 0;
--ndev->mw_count; --ndev->mw_count;
} else { } else {
dev_dbg(ndev_dev(ndev), dev_dbg(&pdev->dev, "b2b bar size is too small\n");
"b2b bar size is too small\n");
return -EIO; return -EIO;
} }
} }
...@@ -2209,7 +2208,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2209,7 +2208,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
* offsets are not in a consistent order (bar5sz comes after ppd, odd). * offsets are not in a consistent order (bar5sz comes after ppd, odd).
*/ */
pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
if (b2b_bar == 2) { if (b2b_bar == 2) {
if (ndev->b2b_off) if (ndev->b2b_off)
bar_sz -= 1; bar_sz -= 1;
...@@ -2218,11 +2217,11 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2218,11 +2217,11 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
} }
pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz); pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
if (!ndev->bar4_split) { if (!ndev->bar4_split) {
pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
if (b2b_bar == 4) { if (b2b_bar == 4) {
if (ndev->b2b_off) if (ndev->b2b_off)
bar_sz -= 1; bar_sz -= 1;
...@@ -2231,10 +2230,10 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2231,10 +2230,10 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
} }
pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz); pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
} else { } else {
pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
if (b2b_bar == 4) { if (b2b_bar == 4) {
if (ndev->b2b_off) if (ndev->b2b_off)
bar_sz -= 1; bar_sz -= 1;
...@@ -2243,10 +2242,10 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2243,10 +2242,10 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
} }
pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz); pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
if (b2b_bar == 5) { if (b2b_bar == 5) {
if (ndev->b2b_off) if (ndev->b2b_off)
bar_sz -= 1; bar_sz -= 1;
...@@ -2255,7 +2254,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2255,7 +2254,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
} }
pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz); pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz); pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz); dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
} }
/* SBAR01 hit by first part of the b2b bar */ /* SBAR01 hit by first part of the b2b bar */
...@@ -2272,7 +2271,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2272,7 +2271,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
else else
return -EIO; return -EIO;
dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET); iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
/* Other SBAR are normally hit by the PBAR xlat, except for b2b bar. /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
...@@ -2283,26 +2282,26 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2283,26 +2282,26 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET); iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET); bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
if (!ndev->bar4_split) { if (!ndev->bar4_split) {
bar_addr = addr->bar4_addr64 + bar_addr = addr->bar4_addr64 +
(b2b_bar == 4 ? ndev->b2b_off : 0); (b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET); iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET); bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
} else { } else {
bar_addr = addr->bar4_addr32 + bar_addr = addr->bar4_addr32 +
(b2b_bar == 4 ? ndev->b2b_off : 0); (b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET); iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET); bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr); dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
bar_addr = addr->bar5_addr32 + bar_addr = addr->bar5_addr32 +
(b2b_bar == 5 ? ndev->b2b_off : 0); (b2b_bar == 5 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET); iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET); bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr); dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
} }
/* setup incoming bar limits == base addrs (zero length windows) */ /* setup incoming bar limits == base addrs (zero length windows) */
...@@ -2310,26 +2309,26 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2310,26 +2309,26 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET); iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET); bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
if (!ndev->bar4_split) { if (!ndev->bar4_split) {
bar_addr = addr->bar4_addr64 + bar_addr = addr->bar4_addr64 +
(b2b_bar == 4 ? ndev->b2b_off : 0); (b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET); iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET); bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
} else { } else {
bar_addr = addr->bar4_addr32 + bar_addr = addr->bar4_addr32 +
(b2b_bar == 4 ? ndev->b2b_off : 0); (b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET); iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET); bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr); dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
bar_addr = addr->bar5_addr32 + bar_addr = addr->bar5_addr32 +
(b2b_bar == 5 ? ndev->b2b_off : 0); (b2b_bar == 5 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET); iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET); bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr); dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
} }
/* zero incoming translation addrs */ /* zero incoming translation addrs */
...@@ -2355,23 +2354,23 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2355,23 +2354,23 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
bar_addr = peer_addr->bar2_addr64; bar_addr = peer_addr->bar2_addr64;
iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET); iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET); bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
if (!ndev->bar4_split) { if (!ndev->bar4_split) {
bar_addr = peer_addr->bar4_addr64; bar_addr = peer_addr->bar4_addr64;
iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET); iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET); bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
} else { } else {
bar_addr = peer_addr->bar4_addr32; bar_addr = peer_addr->bar4_addr32;
iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET); iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET); bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr); dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
bar_addr = peer_addr->bar5_addr32; bar_addr = peer_addr->bar5_addr32;
iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET); iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET); bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr); dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
} }
/* set the translation offset for b2b registers */ /* set the translation offset for b2b registers */
...@@ -2389,7 +2388,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2389,7 +2388,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
return -EIO; return -EIO;
/* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */ /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr); dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL); iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU); iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
...@@ -2408,6 +2407,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, ...@@ -2408,6 +2407,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
static int xeon_init_ntb(struct intel_ntb_dev *ndev) static int xeon_init_ntb(struct intel_ntb_dev *ndev)
{ {
struct device *dev = &ndev->ntb.pdev->dev;
int rc; int rc;
u32 ntb_ctl; u32 ntb_ctl;
...@@ -2423,7 +2423,7 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev) ...@@ -2423,7 +2423,7 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev)
switch (ndev->ntb.topo) { switch (ndev->ntb.topo) {
case NTB_TOPO_PRI: case NTB_TOPO_PRI:
if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
dev_err(ndev_dev(ndev), "NTB Primary config disabled\n"); dev_err(dev, "NTB Primary config disabled\n");
return -EINVAL; return -EINVAL;
} }
...@@ -2441,7 +2441,7 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev) ...@@ -2441,7 +2441,7 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev)
case NTB_TOPO_SEC: case NTB_TOPO_SEC:
if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n"); dev_err(dev, "NTB Secondary config disabled\n");
return -EINVAL; return -EINVAL;
} }
/* use half the spads for the peer */ /* use half the spads for the peer */
...@@ -2466,18 +2466,17 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev) ...@@ -2466,18 +2466,17 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev)
ndev->b2b_idx = b2b_mw_idx; ndev->b2b_idx = b2b_mw_idx;
if (ndev->b2b_idx >= ndev->mw_count) { if (ndev->b2b_idx >= ndev->mw_count) {
dev_dbg(ndev_dev(ndev), dev_dbg(dev,
"b2b_mw_idx %d invalid for mw_count %u\n", "b2b_mw_idx %d invalid for mw_count %u\n",
b2b_mw_idx, ndev->mw_count); b2b_mw_idx, ndev->mw_count);
return -EINVAL; return -EINVAL;
} }
dev_dbg(ndev_dev(ndev), dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
"setting up b2b mw idx %d means %d\n",
b2b_mw_idx, ndev->b2b_idx); b2b_mw_idx, ndev->b2b_idx);
} else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) { } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n"); dev_warn(dev, "Reduce doorbell count by 1\n");
ndev->db_count -= 1; ndev->db_count -= 1;
} }
...@@ -2518,7 +2517,7 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev) ...@@ -2518,7 +2517,7 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
u8 ppd; u8 ppd;
int rc, mem; int rc, mem;
pdev = ndev_pdev(ndev); pdev = ndev->ntb.pdev;
switch (pdev->device) { switch (pdev->device) {
/* There is a Xeon hardware errata related to writes to SDOORBELL or /* There is a Xeon hardware errata related to writes to SDOORBELL or
...@@ -2594,14 +2593,14 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev) ...@@ -2594,14 +2593,14 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
return -EIO; return -EIO;
ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd, dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
ntb_topo_string(ndev->ntb.topo)); ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE) if (ndev->ntb.topo == NTB_TOPO_NONE)
return -EINVAL; return -EINVAL;
if (ndev->ntb.topo != NTB_TOPO_SEC) { if (ndev->ntb.topo != NTB_TOPO_SEC) {
ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd); ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n", dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
ppd, ndev->bar4_split); ppd, ndev->bar4_split);
} else { } else {
/* This is a way for transparent BAR to figure out if we are /* This is a way for transparent BAR to figure out if we are
...@@ -2611,7 +2610,7 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev) ...@@ -2611,7 +2610,7 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
mem = pci_select_bars(pdev, IORESOURCE_MEM); mem = pci_select_bars(pdev, IORESOURCE_MEM);
ndev->bar4_split = hweight32(mem) == ndev->bar4_split = hweight32(mem) ==
HSX_SPLIT_BAR_MW_COUNT + 1; HSX_SPLIT_BAR_MW_COUNT + 1;
dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n", dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
mem, ndev->bar4_split); mem, ndev->bar4_split);
} }
...@@ -2648,7 +2647,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) ...@@ -2648,7 +2647,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) if (rc)
goto err_dma_mask; goto err_dma_mask;
dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n"); dev_warn(&pdev->dev, "Cannot DMA highmem\n");
} }
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
...@@ -2656,7 +2655,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) ...@@ -2656,7 +2655,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) if (rc)
goto err_dma_mask; goto err_dma_mask;
dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n"); dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
} }
ndev->self_mmio = pci_iomap(pdev, 0, 0); ndev->self_mmio = pci_iomap(pdev, 0, 0);
...@@ -2682,7 +2681,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) ...@@ -2682,7 +2681,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev) static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
{ {
struct pci_dev *pdev = ndev_pdev(ndev); struct pci_dev *pdev = ndev->ntb.pdev;
if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio) if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
pci_iounmap(pdev, ndev->peer_mmio); pci_iounmap(pdev, ndev->peer_mmio);
...@@ -3058,4 +3057,3 @@ static void __exit intel_ntb_pci_driver_exit(void) ...@@ -3058,4 +3057,3 @@ static void __exit intel_ntb_pci_driver_exit(void)
debugfs_remove_recursive(debugfs_dir); debugfs_remove_recursive(debugfs_dir);
} }
module_exit(intel_ntb_pci_driver_exit); module_exit(intel_ntb_pci_driver_exit);
...@@ -382,9 +382,6 @@ struct intel_ntb_dev { ...@@ -382,9 +382,6 @@ struct intel_ntb_dev {
struct dentry *debugfs_info; struct dentry *debugfs_info;
}; };
#define ndev_pdev(ndev) ((ndev)->ntb.pdev)
#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
#define ntb_ndev(__ntb) container_of(__ntb, struct intel_ntb_dev, ntb) #define ntb_ndev(__ntb) container_of(__ntb, struct intel_ntb_dev, ntb)
#define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \ #define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \
hb_timer.work) hb_timer.work)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment