Commit e8a71a38 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ntb-5.1' of git://github.com/jonmason/ntb

Pull NTB updates from Jon Mason:

 - fixes for switchtec debugability and mapping table entries

 - NTB transport improvements

 - a reworking of the peer_db_addr for better abstraction

* tag 'ntb-5.1' of git://github.com/jonmason/ntb:
  NTB: add new parameter to peer_db_addr() db_bit and db_data
  NTB: ntb_transport: Ensure the destination buffer is mapped for TX DMA
  NTB: ntb_transport: Free MWs in ntb_transport_link_cleanup()
  ntb_hw_switchtec: Added support of >=4G memory windows
  ntb_hw_switchtec: NT req id mapping table register entry number should be 512
  ntb_hw_switchtec: debug print 64bit aligned crosslink BAR Numbers
parents 2b9c272c ebb09b33
...@@ -180,7 +180,7 @@ int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) ...@@ -180,7 +180,7 @@ int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
return ndev->reg->mw_bar[idx]; return ndev->reg->mw_bar[idx];
} }
static inline int ndev_db_addr(struct intel_ntb_dev *ndev, void ndev_db_addr(struct intel_ntb_dev *ndev,
phys_addr_t *db_addr, resource_size_t *db_size, phys_addr_t *db_addr, resource_size_t *db_size,
phys_addr_t reg_addr, unsigned long reg) phys_addr_t reg_addr, unsigned long reg)
{ {
...@@ -196,8 +196,6 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev, ...@@ -196,8 +196,6 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
*db_size = ndev->reg->db_size; *db_size = ndev->reg->db_size;
dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size); dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
} }
return 0;
} }
u64 ndev_db_read(struct intel_ntb_dev *ndev, u64 ndev_db_read(struct intel_ntb_dev *ndev,
...@@ -1111,13 +1109,28 @@ int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) ...@@ -1111,13 +1109,28 @@ int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
ndev->self_reg->db_mask); ndev->self_reg->db_mask);
} }
int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
resource_size_t *db_size) resource_size_t *db_size, u64 *db_data, int db_bit)
{ {
u64 db_bits;
struct intel_ntb_dev *ndev = ntb_ndev(ntb); struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, if (unlikely(db_bit >= BITS_PER_LONG_LONG))
return -EINVAL;
db_bits = BIT_ULL(db_bit);
if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask))
return -EINVAL;
ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
ndev->peer_reg->db_bell); ndev->peer_reg->db_bell);
if (db_data)
*db_data = db_bits;
return 0;
} }
static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
......
...@@ -147,6 +147,9 @@ extern struct intel_b2b_addr xeon_b2b_dsd_addr; ...@@ -147,6 +147,9 @@ extern struct intel_b2b_addr xeon_b2b_dsd_addr;
int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max, int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max,
int msix_shift, int total_shift); int msix_shift, int total_shift);
enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd); enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
void ndev_db_addr(struct intel_ntb_dev *ndev,
phys_addr_t *db_addr, resource_size_t *db_size,
phys_addr_t reg_addr, unsigned long reg);
u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio); u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio);
int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
void __iomem *mmio); void __iomem *mmio);
...@@ -166,8 +169,6 @@ int intel_ntb_db_vector_count(struct ntb_dev *ntb); ...@@ -166,8 +169,6 @@ int intel_ntb_db_vector_count(struct ntb_dev *ntb);
u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector); u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector);
int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits); int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits);
int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits); int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits);
int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
resource_size_t *db_size);
int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb); int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb);
int intel_ntb_spad_count(struct ntb_dev *ntb); int intel_ntb_spad_count(struct ntb_dev *ntb);
u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx); u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx);
......
...@@ -532,6 +532,37 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, ...@@ -532,6 +532,37 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return 0; return 0;
} }
int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
resource_size_t *db_size,
u64 *db_data, int db_bit)
{
phys_addr_t db_addr_base;
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
if (unlikely(db_bit >= BITS_PER_LONG_LONG))
return -EINVAL;
if (unlikely(BIT_ULL(db_bit) & ~ntb_ndev(ntb)->db_valid_mask))
return -EINVAL;
ndev_db_addr(ndev, &db_addr_base, db_size, ndev->peer_addr,
ndev->peer_reg->db_bell);
if (db_addr) {
*db_addr = db_addr_base + (db_bit * 4);
dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx db bit %d\n",
*db_addr, db_bit);
}
if (db_data) {
*db_data = 1;
dev_dbg(&ndev->ntb.pdev->dev, "Peer db data %llx db bit %d\n",
*db_data, db_bit);
}
return 0;
}
static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{ {
struct intel_ntb_dev *ndev = ntb_ndev(ntb); struct intel_ntb_dev *ndev = ntb_ndev(ntb);
...@@ -584,7 +615,7 @@ const struct ntb_dev_ops intel_ntb3_ops = { ...@@ -584,7 +615,7 @@ const struct ntb_dev_ops intel_ntb3_ops = {
.db_clear = intel_ntb3_db_clear, .db_clear = intel_ntb3_db_clear,
.db_set_mask = intel_ntb_db_set_mask, .db_set_mask = intel_ntb_db_set_mask,
.db_clear_mask = intel_ntb_db_clear_mask, .db_clear_mask = intel_ntb_db_clear_mask,
.peer_db_addr = intel_ntb_peer_db_addr, .peer_db_addr = intel_ntb3_peer_db_addr,
.peer_db_set = intel_ntb3_peer_db_set, .peer_db_set = intel_ntb3_peer_db_set,
.spad_is_unsafe = intel_ntb_spad_is_unsafe, .spad_is_unsafe = intel_ntb_spad_is_unsafe,
.spad_count = intel_ntb_spad_count, .spad_count = intel_ntb_spad_count,
......
...@@ -236,6 +236,7 @@ static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx) ...@@ -236,6 +236,7 @@ static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN; ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(0, &ctl->bar_entry[bar].win_size); iowrite32(0, &ctl->bar_entry[bar].win_size);
iowrite32(0, &ctl->bar_ext_entry[bar].win_size);
iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr); iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
} }
...@@ -258,7 +259,9 @@ static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx, ...@@ -258,7 +259,9 @@ static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
&ctl->bar_entry[bar].win_size);
iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
iowrite64(sndev->self_partition | addr, iowrite64(sndev->self_partition | addr,
&ctl->bar_entry[bar].xlate_addr); &ctl->bar_entry[bar].xlate_addr);
} }
...@@ -679,11 +682,16 @@ static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb) ...@@ -679,11 +682,16 @@ static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr, phys_addr_t *db_addr,
resource_size_t *db_size) resource_size_t *db_size,
u64 *db_data,
int db_bit)
{ {
struct switchtec_ntb *sndev = ntb_sndev(ntb); struct switchtec_ntb *sndev = ntb_sndev(ntb);
unsigned long offset; unsigned long offset;
if (unlikely(db_bit >= BITS_PER_LONG_LONG))
return -EINVAL;
offset = (unsigned long)sndev->mmio_peer_dbmsg->odb - offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
(unsigned long)sndev->stdev->mmio; (unsigned long)sndev->stdev->mmio;
...@@ -693,6 +701,8 @@ static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, ...@@ -693,6 +701,8 @@ static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
*db_addr = pci_resource_start(ntb->pdev, 0) + offset; *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
if (db_size) if (db_size)
*db_size = sizeof(u32); *db_size = sizeof(u32);
if (db_data)
*db_data = BIT_ULL(db_bit) << sndev->db_peer_shift;
return 0; return 0;
} }
...@@ -1025,7 +1035,9 @@ static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx, ...@@ -1025,7 +1035,9 @@ static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
&ctl->bar_entry[bar].win_size);
iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
iowrite64(sndev->peer_partition | addr, iowrite64(sndev->peer_partition | addr,
&ctl->bar_entry[bar].xlate_addr); &ctl->bar_entry[bar].xlate_addr);
} }
...@@ -1092,7 +1104,7 @@ static int crosslink_enum_partition(struct switchtec_ntb *sndev, ...@@ -1092,7 +1104,7 @@ static int crosslink_enum_partition(struct switchtec_ntb *sndev,
dev_dbg(&sndev->stdev->dev, dev_dbg(&sndev->stdev->dev,
"Crosslink BAR%d addr: %llx\n", "Crosslink BAR%d addr: %llx\n",
i, bar_addr); i*2, bar_addr);
if (bar_addr != bar_space * i) if (bar_addr != bar_space * i)
continue; continue;
......
...@@ -144,7 +144,9 @@ struct ntb_transport_qp { ...@@ -144,7 +144,9 @@ struct ntb_transport_qp {
struct list_head tx_free_q; struct list_head tx_free_q;
spinlock_t ntb_tx_free_q_lock; spinlock_t ntb_tx_free_q_lock;
void __iomem *tx_mw; void __iomem *tx_mw;
dma_addr_t tx_mw_phys; phys_addr_t tx_mw_phys;
size_t tx_mw_size;
dma_addr_t tx_mw_dma_addr;
unsigned int tx_index; unsigned int tx_index;
unsigned int tx_max_entry; unsigned int tx_max_entry;
unsigned int tx_max_frame; unsigned int tx_max_frame;
...@@ -862,6 +864,9 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) ...@@ -862,6 +864,9 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
if (!nt->link_is_up) if (!nt->link_is_up)
cancel_delayed_work_sync(&nt->link_work); cancel_delayed_work_sync(&nt->link_work);
for (i = 0; i < nt->mw_count; i++)
ntb_free_mw(nt, i);
/* The scratchpad registers keep the values if the remote side /* The scratchpad registers keep the values if the remote side
* goes down, blast them now to give them a sane value the next * goes down, blast them now to give them a sane value the next
* time they are accessed * time they are accessed
...@@ -1049,6 +1054,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, ...@@ -1049,6 +1054,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
tx_size = (unsigned int)mw_size / num_qps_mw; tx_size = (unsigned int)mw_size / num_qps_mw;
qp_offset = tx_size * (qp_num / mw_count); qp_offset = tx_size * (qp_num / mw_count);
qp->tx_mw_size = tx_size;
qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
if (!qp->tx_mw) if (!qp->tx_mw)
return -EINVAL; return -EINVAL;
...@@ -1644,7 +1650,7 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, ...@@ -1644,7 +1650,7 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
dma_cookie_t cookie; dma_cookie_t cookie;
device = chan->device; device = chan->device;
dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
buff_off = (size_t)buf & ~PAGE_MASK; buff_off = (size_t)buf & ~PAGE_MASK;
dest_off = (size_t)dest & ~PAGE_MASK; dest_off = (size_t)dest & ~PAGE_MASK;
...@@ -1863,6 +1869,18 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1863,6 +1869,18 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
qp->rx_dma_chan = NULL; qp->rx_dma_chan = NULL;
} }
if (qp->tx_dma_chan) {
qp->tx_mw_dma_addr =
dma_map_resource(qp->tx_dma_chan->device->dev,
qp->tx_mw_phys, qp->tx_mw_size,
DMA_FROM_DEVICE, 0);
if (dma_mapping_error(qp->tx_dma_chan->device->dev,
qp->tx_mw_dma_addr)) {
qp->tx_mw_dma_addr = 0;
goto err1;
}
}
dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
qp->tx_dma_chan ? "DMA" : "CPU"); qp->tx_dma_chan ? "DMA" : "CPU");
...@@ -1904,6 +1922,10 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1904,6 +1922,10 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
qp->rx_alloc_entry = 0; qp->rx_alloc_entry = 0;
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry); kfree(entry);
if (qp->tx_mw_dma_addr)
dma_unmap_resource(qp->tx_dma_chan->device->dev,
qp->tx_mw_dma_addr, qp->tx_mw_size,
DMA_FROM_DEVICE, 0);
if (qp->tx_dma_chan) if (qp->tx_dma_chan)
dma_release_channel(qp->tx_dma_chan); dma_release_channel(qp->tx_dma_chan);
if (qp->rx_dma_chan) if (qp->rx_dma_chan)
...@@ -1945,6 +1967,11 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) ...@@ -1945,6 +1967,11 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
*/ */
dma_sync_wait(chan, qp->last_cookie); dma_sync_wait(chan, qp->last_cookie);
dmaengine_terminate_all(chan); dmaengine_terminate_all(chan);
dma_unmap_resource(chan->device->dev,
qp->tx_mw_dma_addr, qp->tx_mw_size,
DMA_FROM_DEVICE, 0);
dma_release_channel(chan); dma_release_channel(chan);
} }
......
...@@ -296,7 +296,8 @@ struct ntb_dev_ops { ...@@ -296,7 +296,8 @@ struct ntb_dev_ops {
int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits); int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
int (*peer_db_addr)(struct ntb_dev *ntb, int (*peer_db_addr)(struct ntb_dev *ntb,
phys_addr_t *db_addr, resource_size_t *db_size); phys_addr_t *db_addr, resource_size_t *db_size,
u64 *db_data, int db_bit);
u64 (*peer_db_read)(struct ntb_dev *ntb); u64 (*peer_db_read)(struct ntb_dev *ntb);
int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits); int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits); int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
...@@ -1078,6 +1079,8 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) ...@@ -1078,6 +1079,8 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
* @ntb: NTB device context. * @ntb: NTB device context.
* @db_addr: OUT - The address of the peer doorbell register. * @db_addr: OUT - The address of the peer doorbell register.
* @db_size: OUT - The number of bytes to write the peer doorbell register. * @db_size: OUT - The number of bytes to write the peer doorbell register.
* @db_data: OUT - The data of peer doorbell register
* @db_bit: door bell bit number
* *
* Return the address of the peer doorbell register. This may be used, for * Return the address of the peer doorbell register. This may be used, for
* example, by drivers that offload memory copy operations to a dma engine. * example, by drivers that offload memory copy operations to a dma engine.
...@@ -1091,12 +1094,13 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) ...@@ -1091,12 +1094,13 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
*/ */
static inline int ntb_peer_db_addr(struct ntb_dev *ntb, static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr, phys_addr_t *db_addr,
resource_size_t *db_size) resource_size_t *db_size,
u64 *db_data, int db_bit)
{ {
if (!ntb->ops->peer_db_addr) if (!ntb->ops->peer_db_addr)
return -EINVAL; return -EINVAL;
return ntb->ops->peer_db_addr(ntb, db_addr, db_size); return ntb->ops->peer_db_addr(ntb, db_addr, db_size, db_data, db_bit);
} }
/** /**
......
...@@ -248,9 +248,13 @@ struct ntb_ctrl_regs { ...@@ -248,9 +248,13 @@ struct ntb_ctrl_regs {
u32 win_size; u32 win_size;
u64 xlate_addr; u64 xlate_addr;
} bar_entry[6]; } bar_entry[6];
u32 reserved2[216]; struct {
u32 req_id_table[256]; u32 win_size;
u32 reserved3[512]; u32 reserved[3];
} bar_ext_entry[6];
u32 reserved2[192];
u32 req_id_table[512];
u32 reserved3[256];
u64 lut_entry[512]; u64 lut_entry[512];
} __packed; } __packed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment