Commit 1eb196c3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ntb-4.4' of git://github.com/jonmason/ntb

Pull NTB bugfixes from Jon Mason:
 "NTB Bug fixes for potential NULL pointer accesses, accesses of a freed
  pointer, invalid buffer pointer, and a compiler warning.

  Also, unification of upstream/downstream addresses"

* tag 'ntb-4.4' of git://github.com/jonmason/ntb:
  NTB: fix 32-bit compiler warning
  NTB: unify translation addresses
  NTB: invalid buf pointer in multi-MW setups
  NTB: remove unused variable
  NTB: fix access of free-ed pointer
  NTB: Fix issue where we may be accessing NULL ptr
parents 041c7951 fdcb4b2e
...@@ -2204,17 +2204,17 @@ static const struct intel_ntb_xlat_reg xeon_sec_xlat = { ...@@ -2204,17 +2204,17 @@ static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
}; };
static struct intel_b2b_addr xeon_b2b_usd_addr = { static struct intel_b2b_addr xeon_b2b_usd_addr = {
.bar2_addr64 = XEON_B2B_BAR2_USD_ADDR64, .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
.bar4_addr64 = XEON_B2B_BAR4_USD_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
.bar4_addr32 = XEON_B2B_BAR4_USD_ADDR32, .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
.bar5_addr32 = XEON_B2B_BAR5_USD_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
}; };
static struct intel_b2b_addr xeon_b2b_dsd_addr = { static struct intel_b2b_addr xeon_b2b_dsd_addr = {
.bar2_addr64 = XEON_B2B_BAR2_DSD_ADDR64, .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
.bar4_addr64 = XEON_B2B_BAR4_DSD_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
.bar4_addr32 = XEON_B2B_BAR4_DSD_ADDR32, .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
.bar5_addr32 = XEON_B2B_BAR5_DSD_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
}; };
/* operations for primary side of local ntb */ /* operations for primary side of local ntb */
......
...@@ -227,16 +227,11 @@ ...@@ -227,16 +227,11 @@
/* Use the following addresses for translation between b2b ntb devices in case /* Use the following addresses for translation between b2b ntb devices in case
* the hardware default values are not reliable. */ * the hardware default values are not reliable. */
#define XEON_B2B_BAR0_USD_ADDR 0x1000000000000000ull #define XEON_B2B_BAR0_ADDR 0x1000000000000000ull
#define XEON_B2B_BAR2_USD_ADDR64 0x2000000000000000ull #define XEON_B2B_BAR2_ADDR64 0x2000000000000000ull
#define XEON_B2B_BAR4_USD_ADDR64 0x4000000000000000ull #define XEON_B2B_BAR4_ADDR64 0x4000000000000000ull
#define XEON_B2B_BAR4_USD_ADDR32 0x20000000u #define XEON_B2B_BAR4_ADDR32 0x20000000u
#define XEON_B2B_BAR5_USD_ADDR32 0x40000000u #define XEON_B2B_BAR5_ADDR32 0x40000000u
#define XEON_B2B_BAR0_DSD_ADDR 0x9000000000000000ull
#define XEON_B2B_BAR2_DSD_ADDR64 0xa000000000000000ull
#define XEON_B2B_BAR4_DSD_ADDR64 0xc000000000000000ull
#define XEON_B2B_BAR4_DSD_ADDR32 0xa0000000u
#define XEON_B2B_BAR5_DSD_ADDR32 0xc0000000u
/* The peer ntb secondary config space is 32KB fixed size */ /* The peer ntb secondary config space is 32KB fixed size */
#define XEON_B2B_MIN_SIZE 0x8000 #define XEON_B2B_MIN_SIZE 0x8000
......
...@@ -605,7 +605,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, ...@@ -605,7 +605,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
num_qps_mw = qp_count / mw_count; num_qps_mw = qp_count / mw_count;
rx_size = (unsigned int)mw->xlat_size / num_qps_mw; rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count; qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
rx_size -= sizeof(struct ntb_rx_info); rx_size -= sizeof(struct ntb_rx_info);
qp->remote_rx_info = qp->rx_buff + rx_size; qp->remote_rx_info = qp->rx_buff + rx_size;
...@@ -825,10 +825,10 @@ static void ntb_transport_link_work(struct work_struct *work) ...@@ -825,10 +825,10 @@ static void ntb_transport_link_work(struct work_struct *work)
size = max_mw_size; size = max_mw_size;
spad = MW0_SZ_HIGH + (i * 2); spad = MW0_SZ_HIGH + (i * 2);
ntb_peer_spad_write(ndev, spad, (u32)(size >> 32)); ntb_peer_spad_write(ndev, spad, upper_32_bits(size));
spad = MW0_SZ_LOW + (i * 2); spad = MW0_SZ_LOW + (i * 2);
ntb_peer_spad_write(ndev, spad, (u32)size); ntb_peer_spad_write(ndev, spad, lower_32_bits(size));
} }
ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count); ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
...@@ -928,7 +928,6 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, ...@@ -928,7 +928,6 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
unsigned int qp_num) unsigned int qp_num)
{ {
struct ntb_transport_qp *qp; struct ntb_transport_qp *qp;
struct ntb_transport_mw *mw;
phys_addr_t mw_base; phys_addr_t mw_base;
resource_size_t mw_size; resource_size_t mw_size;
unsigned int num_qps_mw, tx_size; unsigned int num_qps_mw, tx_size;
...@@ -939,7 +938,6 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, ...@@ -939,7 +938,6 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp_count = nt->qp_count; qp_count = nt->qp_count;
mw_num = QP_TO_MW(nt, qp_num); mw_num = QP_TO_MW(nt, qp_num);
mw = &nt->mw_vec[mw_num];
qp = &nt->qp_vec[qp_num]; qp = &nt->qp_vec[qp_num];
qp->qp_num = qp_num; qp->qp_num = qp_num;
...@@ -958,7 +956,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, ...@@ -958,7 +956,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
mw_size = nt->mw_vec[mw_num].phys_size; mw_size = nt->mw_vec[mw_num].phys_size;
tx_size = (unsigned int)mw_size / num_qps_mw; tx_size = (unsigned int)mw_size / num_qps_mw;
qp_offset = tx_size * qp_num / mw_count; qp_offset = tx_size * (qp_num / mw_count);
qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
if (!qp->tx_mw) if (!qp->tx_mw)
...@@ -1080,7 +1078,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1080,7 +1078,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
GFP_KERNEL, node); GFP_KERNEL, node);
if (!nt->qp_vec) { if (!nt->qp_vec) {
rc = -ENOMEM; rc = -ENOMEM;
goto err2; goto err1;
} }
if (nt_debugfs_dir) { if (nt_debugfs_dir) {
...@@ -1092,7 +1090,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1092,7 +1090,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
for (i = 0; i < qp_count; i++) { for (i = 0; i < qp_count; i++) {
rc = ntb_transport_init_queue(nt, i); rc = ntb_transport_init_queue(nt, i);
if (rc) if (rc)
goto err3; goto err2;
} }
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
...@@ -1100,12 +1098,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1100,12 +1098,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
if (rc) if (rc)
goto err3; goto err2;
INIT_LIST_HEAD(&nt->client_devs); INIT_LIST_HEAD(&nt->client_devs);
rc = ntb_bus_init(nt); rc = ntb_bus_init(nt);
if (rc) if (rc)
goto err4; goto err3;
nt->link_is_up = false; nt->link_is_up = false;
ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
...@@ -1113,17 +1111,16 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) ...@@ -1113,17 +1111,16 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
return 0; return 0;
err4:
ntb_clear_ctx(ndev);
err3: err3:
kfree(nt->qp_vec); ntb_clear_ctx(ndev);
err2: err2:
kfree(nt->mw_vec); kfree(nt->qp_vec);
err1: err1:
while (i--) { while (i--) {
mw = &nt->mw_vec[i]; mw = &nt->mw_vec[i];
iounmap(mw->vbase); iounmap(mw->vbase);
} }
kfree(nt->mw_vec);
err: err:
kfree(nt); kfree(nt);
return rc; return rc;
...@@ -1931,13 +1928,11 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up); ...@@ -1931,13 +1928,11 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
*/ */
void ntb_transport_link_down(struct ntb_transport_qp *qp) void ntb_transport_link_down(struct ntb_transport_qp *qp)
{ {
struct pci_dev *pdev;
int val; int val;
if (!qp) if (!qp)
return; return;
pdev = qp->ndev->pdev;
qp->client_ready = false; qp->client_ready = false;
val = ntb_spad_read(qp->ndev, QP_LINKS); val = ntb_spad_read(qp->ndev, QP_LINKS);
...@@ -1996,23 +1991,24 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num); ...@@ -1996,23 +1991,24 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
*/ */
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
{ {
unsigned int max; unsigned int max_size;
unsigned int copy_align; unsigned int copy_align;
struct dma_chan *rx_chan, *tx_chan;
if (!qp) if (!qp)
return 0; return 0;
if (!qp->tx_dma_chan && !qp->rx_dma_chan) rx_chan = qp->rx_dma_chan;
return qp->tx_max_frame - sizeof(struct ntb_payload_header); tx_chan = qp->tx_dma_chan;
copy_align = max(qp->tx_dma_chan->device->copy_align, copy_align = max(rx_chan ? rx_chan->device->copy_align : 0,
qp->rx_dma_chan->device->copy_align); tx_chan ? tx_chan->device->copy_align : 0);
/* If DMA engine usage is possible, try to find the max size for that */ /* If DMA engine usage is possible, try to find the max size for that */
max = qp->tx_max_frame - sizeof(struct ntb_payload_header); max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
max -= max % (1 << copy_align); max_size = round_down(max_size, 1 << copy_align);
return max; return max_size;
} }
EXPORT_SYMBOL_GPL(ntb_transport_max_size); EXPORT_SYMBOL_GPL(ntb_transport_max_size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment