Commit c156d289 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Light-L2-updates'

Yuval Mintz says:

====================
qed*: Light L2 updates

This series does a major overhaul of the LL2 logic in qed.
The single biggest change done here is in #5 where we're changing
the API qed provides for LL2 [both internally in case of storage and
externally in case of RoCE] to become callback-based to allow cleaner
scalability in preperation to the future iWARP submission which would
aadd additional flavors of LL2. It's also the only patch in series
to modify !qed logic [qedr].

Patches prior to that mostly deal with refactoring LL2 code,
encapsulating varaious parameters into structure and re-ordering
of LL2 code. The latter patches add some small missing bits of LL2
ffunctionality.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5189c555 fef1c3f7
...@@ -886,7 +886,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) ...@@ -886,7 +886,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
memcpy(&sgid->raw[8], guid, sizeof(guid)); memcpy(&sgid->raw[8], guid, sizeof(guid));
/* Update LL2 */ /* Update LL2 */
rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev, rc = dev->ops->ll2_set_mac_filter(dev->cdev,
dev->gsi_ll2_mac_address, dev->gsi_ll2_mac_address,
dev->ndev->dev_addr); dev->ndev->dev_addr);
......
...@@ -150,6 +150,8 @@ struct qedr_dev { ...@@ -150,6 +150,8 @@ struct qedr_dev {
u32 dp_module; u32 dp_module;
u8 dp_level; u8 dp_level;
u8 num_hwfns; u8 num_hwfns;
u8 gsi_ll2_handle;
uint wq_multiplier; uint wq_multiplier;
u8 gsi_ll2_mac_address[ETH_ALEN]; u8 gsi_ll2_mac_address[ETH_ALEN];
int gsi_qp_created; int gsi_qp_created;
......
...@@ -64,9 +64,14 @@ void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, ...@@ -64,9 +64,14 @@ void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
dev->gsi_qp = qp; dev->gsi_qp = qp;
} }
void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) void qedr_ll2_complete_tx_packet(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet)
{ {
struct qedr_dev *dev = (struct qedr_dev *)_qdev; struct qedr_dev *dev = (struct qedr_dev *)cxt;
struct qed_roce_ll2_packet *pkt = cookie;
struct qedr_cq *cq = dev->gsi_sqcq; struct qedr_cq *cq = dev->gsi_sqcq;
struct qedr_qp *qp = dev->gsi_qp; struct qedr_qp *qp = dev->gsi_qp;
unsigned long flags; unsigned long flags;
...@@ -88,20 +93,26 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) ...@@ -88,20 +93,26 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
} }
void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, void qedr_ll2_complete_rx_packet(void *cxt,
struct qed_roce_ll2_rx_params *params) struct qed_ll2_comp_rx_data *data)
{ {
struct qedr_dev *dev = (struct qedr_dev *)_dev; struct qedr_dev *dev = (struct qedr_dev *)cxt;
struct qedr_cq *cq = dev->gsi_rqcq; struct qedr_cq *cq = dev->gsi_rqcq;
struct qedr_qp *qp = dev->gsi_qp; struct qedr_qp *qp = dev->gsi_qp;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&qp->q_lock, flags); spin_lock_irqsave(&qp->q_lock, flags);
qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc; qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id; -EINVAL : 0;
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len; qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac); /* note: length stands for data length i.e. GRH is excluded */
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
data->length.data_length;
*((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
ntohl(data->opaque_data_0);
*((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
ntohs((u16)data->opaque_data_1);
qedr_inc_sw_gsi_cons(&qp->rq); qedr_inc_sw_gsi_cons(&qp->rq);
...@@ -111,6 +122,14 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, ...@@ -111,6 +122,14 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
} }
void qedr_ll2_release_rx_packet(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr, bool b_last_packet)
{
/* Do nothing... */
}
static void qedr_destroy_gsi_cq(struct qedr_dev *dev, static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs) struct ib_qp_init_attr *attrs)
{ {
...@@ -159,27 +178,159 @@ static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev, ...@@ -159,27 +178,159 @@ static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
return 0; return 0;
} }
static int qedr_ll2_post_tx(struct qedr_dev *dev,
struct qed_roce_ll2_packet *pkt)
{
enum qed_ll2_roce_flavor_type roce_flavor;
struct qed_ll2_tx_pkt_info ll2_tx_pkt;
int rc;
int i;
memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
roce_flavor = (pkt->roce_mode == ROCE_V1) ?
QED_LL2_ROCE : QED_LL2_RROCE;
if (pkt->roce_mode == ROCE_V2_IPV4)
ll2_tx_pkt.enable_ip_cksum = 1;
ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg;
ll2_tx_pkt.vlan = 0;
ll2_tx_pkt.tx_dest = pkt->tx_dest;
ll2_tx_pkt.qed_roce_flavor = roce_flavor;
ll2_tx_pkt.first_frag = pkt->header.baddr;
ll2_tx_pkt.first_frag_len = pkt->header.len;
ll2_tx_pkt.cookie = pkt;
/* tx header */
rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
dev->gsi_ll2_handle,
&ll2_tx_pkt, 1);
if (rc) {
/* TX failed while posting header - release resources */
dma_free_coherent(&dev->pdev->dev, pkt->header.len,
pkt->header.vaddr, pkt->header.baddr);
kfree(pkt);
DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
return rc;
}
/* tx payload */
for (i = 0; i < pkt->n_seg; i++) {
rc = dev->ops->ll2_set_fragment_of_tx_packet(
dev->rdma_ctx,
dev->gsi_ll2_handle,
pkt->payload[i].baddr,
pkt->payload[i].len);
if (rc) {
/* if failed not much to do here, partial packet has
* been posted we can't free memory, will need to wait
* for completion
*/
DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
return rc;
}
}
return 0;
}
int qedr_ll2_stop(struct qedr_dev *dev)
{
int rc;
if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
return 0;
/* remove LL2 MAC address filter */
rc = dev->ops->ll2_set_mac_filter(dev->cdev,
dev->gsi_ll2_mac_address, NULL);
rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
dev->gsi_ll2_handle);
if (rc)
DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
return rc;
}
int qedr_ll2_start(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
{
struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs;
int rc;
/* configure and start LL2 */
cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
cbs.rx_release_cb = qedr_ll2_release_rx_packet;
cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
cbs.cookie = dev;
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_ROCE;
data.input.mtu = dev->ndev->mtu;
data.input.rx_num_desc = attrs->cap.max_recv_wr;
data.input.rx_drop_ttl0_flg = true;
data.input.rx_vlan_removal_en = false;
data.input.tx_num_desc = attrs->cap.max_send_wr;
data.input.tx_tc = 0;
data.input.tx_dest = QED_LL2_TX_DEST_NW;
data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
data.input.gsi_enable = 1;
data.p_connection_handle = &dev->gsi_ll2_handle;
data.cbs = &cbs;
rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
if (rc) {
DP_ERR(dev,
"ll2 start: failed to acquire LL2 connection (rc=%d)\n",
rc);
return rc;
}
rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
dev->gsi_ll2_handle);
if (rc) {
DP_ERR(dev,
"ll2 start: failed to establish LL2 connection (rc=%d)\n",
rc);
goto err1;
}
rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
if (rc)
goto err2;
return 0;
err2:
dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
err1:
dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
return rc;
}
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs, struct ib_qp_init_attr *attrs,
struct qedr_qp *qp) struct qedr_qp *qp)
{ {
struct qed_roce_ll2_params ll2_params;
int rc; int rc;
rc = qedr_check_gsi_qp_attrs(dev, attrs); rc = qedr_check_gsi_qp_attrs(dev, attrs);
if (rc) if (rc)
return ERR_PTR(rc); return ERR_PTR(rc);
/* configure and start LL2 */ rc = qedr_ll2_start(dev, attrs, qp);
memset(&ll2_params, 0, sizeof(ll2_params));
ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
ll2_params.cb_cookie = (void *)dev;
ll2_params.mtu = dev->ndev->mtu;
ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
if (rc) { if (rc) {
DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
return ERR_PTR(rc); return ERR_PTR(rc);
...@@ -214,7 +365,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, ...@@ -214,7 +365,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
err: err:
kfree(qp->rqe_wr_id); kfree(qp->rqe_wr_id);
rc = dev->ops->roce_ll2_stop(dev->cdev); rc = qedr_ll2_stop(dev);
if (rc) if (rc)
DP_ERR(dev, "create gsi qp: failed destroy on create\n"); DP_ERR(dev, "create gsi qp: failed destroy on create\n");
...@@ -223,15 +374,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, ...@@ -223,15 +374,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
int qedr_destroy_gsi_qp(struct qedr_dev *dev) int qedr_destroy_gsi_qp(struct qedr_dev *dev)
{ {
int rc; return qedr_ll2_stop(dev);
rc = dev->ops->roce_ll2_stop(dev->cdev);
if (rc)
DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
else
DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
return rc;
} }
#define QEDR_MAX_UD_HEADER_SIZE (100) #define QEDR_MAX_UD_HEADER_SIZE (100)
...@@ -421,7 +564,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -421,7 +564,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{ {
struct qed_roce_ll2_packet *pkt = NULL; struct qed_roce_ll2_packet *pkt = NULL;
struct qedr_qp *qp = get_qedr_qp(ibqp); struct qedr_qp *qp = get_qedr_qp(ibqp);
struct qed_roce_ll2_tx_params params;
struct qedr_dev *dev = qp->dev; struct qedr_dev *dev = qp->dev;
unsigned long flags; unsigned long flags;
int rc; int rc;
...@@ -449,8 +591,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -449,8 +591,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto err; goto err;
} }
memset(&params, 0, sizeof(params));
spin_lock_irqsave(&qp->q_lock, flags); spin_lock_irqsave(&qp->q_lock, flags);
rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
...@@ -459,7 +599,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -459,7 +599,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto err; goto err;
} }
rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, &params); rc = qedr_ll2_post_tx(dev, pkt);
if (!rc) { if (!rc) {
qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
qedr_inc_sw_prod(&qp->sq); qedr_inc_sw_prod(&qp->sq);
...@@ -467,17 +608,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -467,17 +608,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
"gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n", "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
} else { } else {
if (rc == QED_ROCE_TX_HEAD_FAILURE) {
/* TX failed while posting header - release resources */
dma_free_coherent(&dev->pdev->dev, pkt->header.len,
pkt->header.vaddr, pkt->header.baddr);
kfree(pkt);
} else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
/* NTD since TX failed while posting a fragment. We will
* release the resources on TX callback
*/
}
DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc); DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
rc = -EAGAIN; rc = -EAGAIN;
*bad_wr = wr; *bad_wr = wr;
...@@ -504,10 +634,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -504,10 +634,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
{ {
struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_dev *dev = get_qedr_dev(ibqp->device);
struct qedr_qp *qp = get_qedr_qp(ibqp); struct qedr_qp *qp = get_qedr_qp(ibqp);
struct qed_roce_ll2_buffer buf;
unsigned long flags; unsigned long flags;
int status = 0; int rc = 0;
int rc;
if ((qp->state != QED_ROCE_QP_STATE_RTR) && if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
(qp->state != QED_ROCE_QP_STATE_RTS)) { (qp->state != QED_ROCE_QP_STATE_RTS)) {
...@@ -518,8 +646,6 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -518,8 +646,6 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
return -EINVAL; return -EINVAL;
} }
memset(&buf, 0, sizeof(buf));
spin_lock_irqsave(&qp->q_lock, flags); spin_lock_irqsave(&qp->q_lock, flags);
while (wr) { while (wr) {
...@@ -530,10 +656,12 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -530,10 +656,12 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
goto err; goto err;
} }
buf.baddr = wr->sg_list[0].addr; rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
buf.len = wr->sg_list[0].length; dev->gsi_ll2_handle,
wr->sg_list[0].addr,
rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1); wr->sg_list[0].length,
0 /* cookie */,
1 /* notify_fw */);
if (rc) { if (rc) {
DP_ERR(dev, DP_ERR(dev,
"gsi post recv: failed to post rx buffer (rc=%d)\n", "gsi post recv: failed to post rx buffer (rc=%d)\n",
...@@ -553,7 +681,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -553,7 +681,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_unlock_irqrestore(&qp->q_lock, flags); spin_unlock_irqrestore(&qp->q_lock, flags);
return status; return rc;
err: err:
spin_unlock_irqrestore(&qp->q_lock, flags); spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr; *bad_wr = wr;
......
...@@ -552,7 +552,6 @@ struct qed_hwfn { ...@@ -552,7 +552,6 @@ struct qed_hwfn {
#endif #endif
struct z_stream_s *stream; struct z_stream_s *stream;
struct qed_roce_ll2_info *ll2;
}; };
struct pci_params { struct pci_params {
......
...@@ -73,7 +73,6 @@ struct qed_cb_ll2_info { ...@@ -73,7 +73,6 @@ struct qed_cb_ll2_info {
int rx_cnt; int rx_cnt;
u32 rx_size; u32 rx_size;
u8 handle; u8 handle;
bool frags_mapped;
/* Lock protecting LL2 buffer lists in sleepless context */ /* Lock protecting LL2 buffer lists in sleepless context */
spinlock_t lock; spinlock_t lock;
...@@ -89,13 +88,14 @@ struct qed_ll2_buffer { ...@@ -89,13 +88,14 @@ struct qed_ll2_buffer {
dma_addr_t phys_addr; dma_addr_t phys_addr;
}; };
static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn, static void qed_ll2b_complete_tx_packet(void *cxt,
u8 connection_handle, u8 connection_handle,
void *cookie, void *cookie,
dma_addr_t first_frag_addr, dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_fragment,
bool b_last_packet) bool b_last_packet)
{ {
struct qed_hwfn *p_hwfn = cxt;
struct qed_dev *cdev = p_hwfn->cdev; struct qed_dev *cdev = p_hwfn->cdev;
struct sk_buff *skb = cookie; struct sk_buff *skb = cookie;
...@@ -107,12 +107,6 @@ static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -107,12 +107,6 @@ static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb, cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
b_last_fragment); b_last_fragment);
if (cdev->ll2->frags_mapped)
/* Case where mapped frags were received, need to
* free skb with nr_frags marked as 0
*/
skb_shinfo(skb)->nr_frags = 0;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
...@@ -164,42 +158,34 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev) ...@@ -164,42 +158,34 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer); qed_ll2_dealloc_buffer(cdev, buffer);
} }
static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
u8 connection_handle,
struct qed_ll2_rx_packet *p_pkt,
struct core_rx_fast_path_cqe *p_cqe,
bool b_last_packet)
{ {
u16 packet_length = le16_to_cpu(p_cqe->packet_length); struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_buffer *buffer = p_pkt->cookie; struct qed_ll2_buffer *buffer = data->cookie;
struct qed_dev *cdev = p_hwfn->cdev; struct qed_dev *cdev = p_hwfn->cdev;
u16 vlan = le16_to_cpu(p_cqe->vlan);
u32 opaque_data_0, opaque_data_1;
u8 pad = p_cqe->placement_offset;
dma_addr_t new_phys_addr; dma_addr_t new_phys_addr;
struct sk_buff *skb; struct sk_buff *skb;
bool reuse = false; bool reuse = false;
int rc = -EINVAL; int rc = -EINVAL;
u8 *new_data; u8 *new_data;
opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
(NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA), (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
"Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n", "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
(u64)p_pkt->rx_buf_addr, pad, packet_length, (u64)data->rx_buf_addr,
le16_to_cpu(p_cqe->parse_flags.flags), vlan, data->u.placement_offset,
opaque_data_0, opaque_data_1); data->length.packet_length,
data->parse_flags,
data->vlan, data->opaque_data_0, data->opaque_data_1);
if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) { if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
print_hex_dump(KERN_INFO, "", print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_OFFSET, 16, 1, DUMP_PREFIX_OFFSET, 16, 1,
buffer->data, packet_length, false); buffer->data, data->length.packet_length, false);
} }
/* Determine if data is valid */ /* Determine if data is valid */
if (packet_length < ETH_HLEN) if (data->length.packet_length < ETH_HLEN)
reuse = true; reuse = true;
/* Allocate a replacement for buffer; Reuse upon failure */ /* Allocate a replacement for buffer; Reuse upon failure */
...@@ -219,9 +205,9 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, ...@@ -219,9 +205,9 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
goto out_post; goto out_post;
} }
pad += NET_SKB_PAD; data->u.placement_offset += NET_SKB_PAD;
skb_reserve(skb, pad); skb_reserve(skb, data->u.placement_offset);
skb_put(skb, packet_length); skb_put(skb, data->length.packet_length);
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
/* Get parital ethernet information instead of eth_type_trans(), /* Get parital ethernet information instead of eth_type_trans(),
...@@ -232,10 +218,12 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, ...@@ -232,10 +218,12 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
/* Pass SKB onward */ /* Pass SKB onward */
if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
if (vlan) if (data->vlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
data->vlan);
cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
opaque_data_0, opaque_data_1); data->opaque_data_0,
data->opaque_data_1);
} }
/* Update Buffer information and update FW producer */ /* Update Buffer information and update FW producer */
...@@ -321,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -321,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry); list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq); b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
...@@ -333,16 +321,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -333,16 +321,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
b_last_frag = b_last_frag =
p_tx->cur_completing_bd_idx == p_pkt->bd_used; p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag; tx_frag = p_pkt->bds_set[0].tx_frag;
if (p_ll2_conn->conn.gsi_enable) p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
qed_ll2b_release_tx_gsi_packet(p_hwfn,
p_ll2_conn->
my_id,
p_pkt->cookie,
tx_frag,
b_last_frag,
b_last_packet);
else
qed_ll2b_complete_tx_packet(p_hwfn,
p_ll2_conn->my_id, p_ll2_conn->my_id,
p_pkt->cookie, p_pkt->cookie,
tx_frag, tx_frag,
...@@ -360,7 +339,6 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) ...@@ -360,7 +339,6 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_tx_packet *p_pkt; struct qed_ll2_tx_packet *p_pkt;
bool b_last_frag = false; bool b_last_frag = false;
unsigned long flags; unsigned long flags;
dma_addr_t tx_frag;
int rc = -EINVAL; int rc = -EINVAL;
spin_lock_irqsave(&p_tx->lock, flags); spin_lock_irqsave(&p_tx->lock, flags);
...@@ -401,19 +379,13 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) ...@@ -401,19 +379,13 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
spin_unlock_irqrestore(&p_tx->lock, flags); spin_unlock_irqrestore(&p_tx->lock, flags);
tx_frag = p_pkt->bds_set[0].tx_frag;
if (p_ll2_conn->conn.gsi_enable) p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
qed_ll2b_complete_tx_gsi_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
tx_frag,
b_last_frag, !num_bds);
else
qed_ll2b_complete_tx_packet(p_hwfn,
p_ll2_conn->my_id, p_ll2_conn->my_id,
p_pkt->cookie, p_pkt->cookie,
tx_frag, p_pkt->bds_set[0].tx_frag,
b_last_frag, !num_bds); b_last_frag, !num_bds);
spin_lock_irqsave(&p_tx->lock, flags); spin_lock_irqsave(&p_tx->lock, flags);
} }
...@@ -424,81 +396,71 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) ...@@ -424,81 +396,71 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
return rc; return rc;
} }
static int static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info,
union core_rx_cqe_union *p_cqe, union core_rx_cqe_union *p_cqe,
unsigned long lock_flags, bool b_last_cqe) struct qed_ll2_comp_rx_data *data)
{ {
struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue; data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
struct qed_ll2_rx_packet *p_pkt = NULL; data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
u16 packet_length, parse_flags, vlan; data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
u32 src_mac_addrhi; data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
u16 src_mac_addrlo; data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
if (!list_empty(&p_rx->active_descq)) }
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if (!p_pkt) {
DP_NOTICE(p_hwfn,
"GSI Rx completion but active_descq is empty\n");
return -EIO;
}
list_del(&p_pkt->list_entry);
parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
DP_NOTICE(p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
spin_unlock_irqrestore(&p_rx->lock, lock_flags);
qed_ll2b_complete_rx_gsi_packet(p_hwfn,
p_ll2_info->my_id,
p_pkt->cookie,
p_pkt->rx_buf_addr,
packet_length,
p_cqe->rx_cqe_gsi.data_length_error,
parse_flags,
vlan,
src_mac_addrhi,
src_mac_addrlo, b_last_cqe);
spin_lock_irqsave(&p_rx->lock, lock_flags);
return 0; static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
union core_rx_cqe_union *p_cqe,
struct qed_ll2_comp_rx_data *data)
{
data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
data->length.packet_length =
le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
} }
static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, static int
qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn, struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe, union core_rx_cqe_union *p_cqe,
unsigned long *p_lock_flags, unsigned long *p_lock_flags, bool b_last_cqe)
bool b_last_cqe)
{ {
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct qed_ll2_rx_packet *p_pkt = NULL; struct qed_ll2_rx_packet *p_pkt = NULL;
struct qed_ll2_comp_rx_data data;
if (!list_empty(&p_rx->active_descq)) if (!list_empty(&p_rx->active_descq))
p_pkt = list_first_entry(&p_rx->active_descq, p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry); struct qed_ll2_rx_packet, list_entry);
if (!p_pkt) { if (!p_pkt) {
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"LL2 Rx completion but active_descq is empty\n"); "[%d] LL2 Rx completion but active_descq is empty\n",
p_ll2_conn->input.conn_type);
return -EIO; return -EIO;
} }
list_del(&p_pkt->list_entry); list_del(&p_pkt->list_entry);
if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
else
qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n"); "Mismatch between active_descq and the LL2 Rx chain\n");
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
data.connection_handle = p_ll2_conn->my_id;
data.cookie = p_pkt->cookie;
data.rx_buf_addr = p_pkt->rx_buf_addr;
data.b_last_packet = b_last_cqe;
spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id, p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
spin_lock_irqsave(&p_rx->lock, *p_lock_flags); spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
return 0; return 0;
...@@ -506,7 +468,7 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, ...@@ -506,7 +468,7 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
{ {
struct qed_ll2_info *p_ll2_conn = cookie; struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
union core_rx_cqe_union *cqe = NULL; union core_rx_cqe_union *cqe = NULL;
u16 cq_new_idx = 0, cq_old_idx = 0; u16 cq_new_idx = 0, cq_old_idx = 0;
...@@ -520,7 +482,9 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) ...@@ -520,7 +482,9 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
while (cq_new_idx != cq_old_idx) { while (cq_new_idx != cq_old_idx) {
bool b_last_cqe = (cq_new_idx == cq_old_idx); bool b_last_cqe = (cq_new_idx == cq_old_idx);
cqe = qed_chain_consume(&p_rx->rcq_chain); cqe =
(union core_rx_cqe_union *)
qed_chain_consume(&p_rx->rcq_chain);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
...@@ -534,11 +498,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) ...@@ -534,11 +498,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
rc = -EINVAL; rc = -EINVAL;
break; break;
case CORE_RX_CQE_TYPE_GSI_OFFLOAD: case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
cqe, flags, b_last_cqe);
break;
case CORE_RX_CQE_TYPE_REGULAR: case CORE_RX_CQE_TYPE_REGULAR:
rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn, rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
cqe, &flags, cqe, &flags,
b_last_cqe); b_last_cqe);
break; break;
...@@ -564,10 +525,6 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -564,10 +525,6 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_rx = &p_ll2_conn->rx_queue; p_rx = &p_ll2_conn->rx_queue;
while (!list_empty(&p_rx->active_descq)) { while (!list_empty(&p_rx->active_descq)) {
dma_addr_t rx_buf_addr;
void *cookie;
bool b_last;
p_pkt = list_first_entry(&p_rx->active_descq, p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry); struct qed_ll2_rx_packet, list_entry);
if (!p_pkt) if (!p_pkt)
...@@ -575,22 +532,26 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -575,22 +532,26 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer); p_buffer);
} else { } else {
rx_buf_addr = p_pkt->rx_buf_addr; dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
cookie = p_pkt->cookie; void *cookie = p_pkt->cookie;
bool b_last;
b_last = list_empty(&p_rx->active_descq); b_last = list_empty(&p_rx->active_descq);
p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
p_ll2_conn->my_id,
cookie,
rx_buf_addr, b_last);
} }
} }
} }
#if IS_ENABLED(CONFIG_QED_ISCSI)
static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags) static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
{ {
u8 bd_flags = 0; u8 bd_flags = 0;
...@@ -740,12 +701,13 @@ static void ...@@ -740,12 +701,13 @@ static void
qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) struct qed_ll2_info *p_ll2_conn)
{ {
struct qed_ll2_tx_pkt_info tx_pkt;
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
int rc;
u16 l4_hdr_offset_w; u16 l4_hdr_offset_w;
dma_addr_t first_frag; dma_addr_t first_frag;
u16 parse_flags; u16 parse_flags;
u8 bd_flags; u8 bd_flags;
int rc;
/* Submit Tx buffers here */ /* Submit Tx buffers here */
while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
...@@ -760,13 +722,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, ...@@ -760,13 +722,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, memset(&tx_pkt, 0, sizeof(tx_pkt));
p_buffer->vlan, bd_flags, tx_pkt.num_of_bds = 1;
l4_hdr_offset_w, tx_pkt.vlan = p_buffer->vlan;
p_ll2_conn->conn.tx_dest, 0, tx_pkt.bd_flags = bd_flags;
first_frag, tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
p_buffer->packet_length, tx_pkt.tx_dest = p_ll2_conn->tx_dest;
p_buffer, true); tx_pkt.first_frag = first_frag;
tx_pkt.first_frag_len = p_buffer->packet_length;
tx_pkt.cookie = p_buffer;
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
&tx_pkt, true);
if (rc) { if (rc) {
qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer, false); p_buffer, false);
...@@ -873,85 +840,6 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) ...@@ -873,85 +840,6 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
return 0; return 0;
} }
static int
qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info,
u16 rx_num_ooo_buffers, u16 mtu)
{
struct qed_ooo_buffer *p_buf = NULL;
void *p_virt;
u16 buf_idx;
int rc = 0;
if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return rc;
if (!rx_num_ooo_buffers)
return -EINVAL;
for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
if (!p_buf) {
rc = -ENOMEM;
goto out;
}
p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
ETH_CACHE_LINE_SIZE - 1) &
~(ETH_CACHE_LINE_SIZE - 1);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
p_buf->rx_buffer_size,
&p_buf->rx_buffer_phys_addr,
GFP_KERNEL);
if (!p_virt) {
kfree(p_buf);
rc = -ENOMEM;
goto out;
}
p_buf->rx_buffer_virt_addr = p_virt;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
}
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
rx_num_ooo_buffers, p_buf->rx_buffer_size);
out:
return rc;
}
static void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
}
static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
struct qed_ooo_buffer *p_buffer;
if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
p_hwfn->p_ooo_info))) {
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
p_buffer->rx_buffer_virt_addr,
p_buffer->rx_buffer_phys_addr);
kfree(p_buffer);
}
}
static void qed_ll2_stop_ooo(struct qed_dev *cdev) static void qed_ll2_stop_ooo(struct qed_dev *cdev)
{ {
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
...@@ -965,69 +853,11 @@ static void qed_ll2_stop_ooo(struct qed_dev *cdev) ...@@ -965,69 +853,11 @@ static void qed_ll2_stop_ooo(struct qed_dev *cdev)
*handle = QED_LL2_UNUSED_HANDLE; *handle = QED_LL2_UNUSED_HANDLE;
} }
static int qed_ll2_start_ooo(struct qed_dev *cdev,
struct qed_ll2_params *params)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
struct qed_ll2_conn ll2_info = { 0 };
int rc;
ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
ll2_info.mtu = params->mtu;
ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
ll2_info.tx_tc = OOO_LB_TC;
ll2_info.tx_dest = CORE_TX_DEST_LB;
rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
handle);
if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
goto out;
}
rc = qed_ll2_establish_connection(hwfn, *handle);
if (rc) {
DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
goto fail;
}
return 0;
fail:
qed_ll2_release_connection(hwfn, *handle);
out:
*handle = QED_LL2_UNUSED_HANDLE;
return rc;
}
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
void *p_cookie) { return -EINVAL; }
static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
void *p_cookie) { return -EINVAL; }
static inline int
qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info,
u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
static inline void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) { return; }
static inline void
qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) { return; }
static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
struct qed_ll2_params *params)
{ return -EINVAL; }
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn, struct qed_ll2_info *p_ll2_conn,
u8 action_on_error) u8 action_on_error)
{ {
enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct core_rx_start_ramrod_data *p_ramrod = NULL; struct core_rx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
...@@ -1053,16 +883,15 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -1053,16 +883,15 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_index = p_rx->rx_sb_index; p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1; p_ramrod->complete_event_flg = 1;
p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
DMA_REGPAIR_LE(p_ramrod->bd_base, DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
p_rx->rxq_chain.p_phys_addr);
cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
qed_chain_get_pbl_phys(&p_rx->rcq_chain)); qed_chain_get_pbl_phys(&p_rx->rcq_chain));
p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg; p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en; p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
: 1; : 1;
...@@ -1077,14 +906,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -1077,14 +906,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
} }
p_ramrod->action_on_error.error_type = action_on_error; p_ramrod->action_on_error.error_type = action_on_error;
p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) struct qed_ll2_info *p_ll2_conn)
{ {
enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct core_tx_start_ramrod_data *p_ramrod = NULL; struct core_tx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
...@@ -1095,7 +924,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -1095,7 +924,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0; return 0;
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
p_ll2_conn->tx_stats_en = 0; p_ll2_conn->tx_stats_en = 0;
else else
p_ll2_conn->tx_stats_en = 1; p_ll2_conn->tx_stats_en = 1;
...@@ -1116,7 +945,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -1116,7 +945,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_tx->tx_sb_index; p_ramrod->sb_index = p_tx->tx_sb_index;
p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
p_ramrod->stats_en = p_ll2_conn->tx_stats_en; p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
p_ramrod->stats_id = p_ll2_conn->tx_stats_id; p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
...@@ -1125,7 +954,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -1125,7 +954,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain); pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
p_ramrod->pbl_size = cpu_to_le16(pbl_size); p_ramrod->pbl_size = cpu_to_le16(pbl_size);
switch (p_ll2_conn->conn.tx_tc) { switch (p_ll2_conn->input.tx_tc) {
case LB_TC: case LB_TC:
pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
break; break;
...@@ -1155,7 +984,8 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -1155,7 +984,8 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
} }
p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
...@@ -1211,20 +1041,20 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, ...@@ -1211,20 +1041,20 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
static int static int
qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info, u16 rx_num_desc) struct qed_ll2_info *p_ll2_info)
{ {
struct qed_ll2_rx_packet *p_descq; struct qed_ll2_rx_packet *p_descq;
u32 capacity; u32 capacity;
int rc = 0; int rc = 0;
if (!rx_num_desc) if (!p_ll2_info->input.rx_num_desc)
goto out; goto out;
rc = qed_chain_alloc(p_hwfn->cdev, rc = qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR, QED_CHAIN_MODE_NEXT_PTR,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
rx_num_desc, p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_bd), sizeof(struct core_rx_bd),
&p_ll2_info->rx_queue.rxq_chain); &p_ll2_info->rx_queue.rxq_chain);
if (rc) { if (rc) {
...@@ -1246,7 +1076,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, ...@@ -1246,7 +1076,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL, QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
rx_num_desc, p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_fast_path_cqe), sizeof(struct core_rx_fast_path_cqe),
&p_ll2_info->rx_queue.rcq_chain); &p_ll2_info->rx_queue.rcq_chain);
if (rc) { if (rc) {
...@@ -1256,28 +1086,27 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, ...@@ -1256,28 +1086,27 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2, DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
p_ll2_info->conn.conn_type, rx_num_desc); p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
out: out:
return rc; return rc;
} }
static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info, struct qed_ll2_info *p_ll2_info)
u16 tx_num_desc)
{ {
struct qed_ll2_tx_packet *p_descq; struct qed_ll2_tx_packet *p_descq;
u32 capacity; u32 capacity;
int rc = 0; int rc = 0;
if (!tx_num_desc) if (!p_ll2_info->input.tx_num_desc)
goto out; goto out;
rc = qed_chain_alloc(p_hwfn->cdev, rc = qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL, QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
tx_num_desc, p_ll2_info->input.tx_num_desc,
sizeof(struct core_tx_bd), sizeof(struct core_tx_bd),
&p_ll2_info->tx_queue.txq_chain); &p_ll2_info->tx_queue.txq_chain);
if (rc) if (rc)
...@@ -1294,28 +1123,112 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, ...@@ -1294,28 +1123,112 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2, DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
p_ll2_info->conn.conn_type, tx_num_desc); p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
out: out:
if (rc) if (rc)
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Can't allocate memory for Tx LL2 with 0x%08x buffers\n", "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
tx_num_desc); p_ll2_info->input.tx_num_desc);
return rc;
}
static int
qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info, u16 mtu)
{
struct qed_ooo_buffer *p_buf = NULL;
void *p_virt;
u16 buf_idx;
int rc = 0;
if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return rc; return rc;
/* Correct number of requested OOO buffers if needed */
if (!p_ll2_info->input.rx_num_ooo_buffers) {
u16 num_desc = p_ll2_info->input.rx_num_desc;
if (!num_desc)
return -EINVAL;
p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
}
for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
buf_idx++) {
p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
if (!p_buf) {
rc = -ENOMEM;
goto out;
}
p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
ETH_CACHE_LINE_SIZE - 1) &
~(ETH_CACHE_LINE_SIZE - 1);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
p_buf->rx_buffer_size,
&p_buf->rx_buffer_phys_addr,
GFP_KERNEL);
if (!p_virt) {
kfree(p_buf);
rc = -ENOMEM;
goto out;
}
p_buf->rx_buffer_virt_addr = p_virt;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
}
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
out:
return rc;
}
static int
qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
{
if (!cbs || (!cbs->rx_comp_cb ||
!cbs->rx_release_cb ||
!cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
return -EINVAL;
p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
p_ll2_info->cbs.cookie = cbs->cookie;
return 0;
} }
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, static enum core_error_handle
struct qed_ll2_conn *p_params, qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
u16 rx_num_desc,
u16 tx_num_desc,
u8 *p_connection_handle)
{ {
switch (err) {
case QED_LL2_DROP_PACKET:
return LL2_DROP_PACKET;
case QED_LL2_DO_NOTHING:
return LL2_DO_NOTHING;
case QED_LL2_ASSERT:
return LL2_ASSERT;
default:
return LL2_DO_NOTHING;
}
}
int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
{
struct qed_hwfn *p_hwfn = cxt;
qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
struct qed_ll2_info *p_ll2_info = NULL; struct qed_ll2_info *p_ll2_info = NULL;
u8 i, *p_tx_max;
int rc; int rc;
u8 i;
if (!p_connection_handle || !p_hwfn->p_ll2_info) if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
return -EINVAL; return -EINVAL;
/* Find a free connection to be used */ /* Find a free connection to be used */
...@@ -1334,23 +1247,40 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, ...@@ -1334,23 +1247,40 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
if (!p_ll2_info) if (!p_ll2_info)
return -EBUSY; return -EBUSY;
p_ll2_info->conn = *p_params; memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
CORE_TX_DEST_NW : CORE_TX_DEST_LB;
/* Correct maximum number of Tx BDs */
p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
if (*p_tx_max == 0)
*p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
else
*p_tx_max = min_t(u8, *p_tx_max,
CORE_LL2_TX_MAX_BDS_PER_PACKET);
rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
if (rc) {
DP_NOTICE(p_hwfn, "Invalid callback functions\n");
goto q_allocate_fail;
}
rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
if (rc) if (rc)
goto q_allocate_fail; goto q_allocate_fail;
rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc); rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
if (rc) if (rc)
goto q_allocate_fail; goto q_allocate_fail;
rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
rx_num_desc * 2, p_params->mtu); data->input.mtu);
if (rc) if (rc)
goto q_allocate_fail; goto q_allocate_fail;
/* Register callbacks for the Rx/Tx queues */ /* Register callbacks for the Rx/Tx queues */
if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) { if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
comp_rx_cb = qed_ll2_lb_rxq_completion; comp_rx_cb = qed_ll2_lb_rxq_completion;
comp_tx_cb = qed_ll2_lb_txq_completion; comp_tx_cb = qed_ll2_lb_txq_completion;
} else { } else {
...@@ -1358,7 +1288,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, ...@@ -1358,7 +1288,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
comp_tx_cb = qed_ll2_txq_completion; comp_tx_cb = qed_ll2_txq_completion;
} }
if (rx_num_desc) { if (data->input.rx_num_desc) {
qed_int_register_cb(p_hwfn, comp_rx_cb, qed_int_register_cb(p_hwfn, comp_rx_cb,
&p_hwfn->p_ll2_info[i], &p_hwfn->p_ll2_info[i],
&p_ll2_info->rx_queue.rx_sb_index, &p_ll2_info->rx_queue.rx_sb_index,
...@@ -1366,7 +1296,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, ...@@ -1366,7 +1296,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
p_ll2_info->rx_queue.b_cb_registred = true; p_ll2_info->rx_queue.b_cb_registred = true;
} }
if (tx_num_desc) { if (data->input.tx_num_desc) {
qed_int_register_cb(p_hwfn, qed_int_register_cb(p_hwfn,
comp_tx_cb, comp_tx_cb,
&p_hwfn->p_ll2_info[i], &p_hwfn->p_ll2_info[i],
...@@ -1375,7 +1305,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, ...@@ -1375,7 +1305,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
p_ll2_info->tx_queue.b_cb_registred = true; p_ll2_info->tx_queue.b_cb_registred = true;
} }
*p_connection_handle = i; *data->p_connection_handle = i;
return rc; return rc;
q_allocate_fail: q_allocate_fail:
...@@ -1386,24 +1316,39 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, ...@@ -1386,24 +1316,39 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) struct qed_ll2_info *p_ll2_conn)
{ {
enum qed_ll2_error_handle error_input;
enum core_error_handle error_mode;
u8 action_on_error = 0; u8 action_on_error = 0;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0; return 0;
DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0); DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
error_input = p_ll2_conn->input.ai_err_packet_too_big;
SET_FIELD(action_on_error, error_mode = qed_ll2_get_error_choice(error_input);
CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
p_ll2_conn->conn.ai_err_packet_too_big);
SET_FIELD(action_on_error, SET_FIELD(action_on_error,
CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf); CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
error_input = p_ll2_conn->input.ai_err_no_buf;
error_mode = qed_ll2_get_error_choice(error_input);
SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
} }
int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) static void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
}
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{ {
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn; struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_rx_queue *p_rx; struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx; struct qed_ll2_tx_queue *p_tx;
...@@ -1481,7 +1426,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -1481,7 +1426,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
qed_llh_add_protocol_filter(p_hwfn, p_ptt, qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8906, 0, 0x8906, 0,
QED_LLH_FILTER_ETHERTYPE); QED_LLH_FILTER_ETHERTYPE);
...@@ -1530,11 +1475,12 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, ...@@ -1530,11 +1475,12 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
} }
int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle, u8 connection_handle,
dma_addr_t addr, dma_addr_t addr,
u16 buf_len, void *cookie, u8 notify_fw) u16 buf_len, void *cookie, u8 notify_fw)
{ {
struct qed_hwfn *p_hwfn = cxt;
struct core_rx_bd_with_buff_len *p_curb = NULL; struct core_rx_bd_with_buff_len *p_curb = NULL;
struct qed_ll2_rx_packet *p_curp = NULL; struct qed_ll2_rx_packet *p_curp = NULL;
struct qed_ll2_info *p_ll2_conn; struct qed_ll2_info *p_ll2_conn;
...@@ -1593,20 +1539,18 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, ...@@ -1593,20 +1539,18 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
struct qed_ll2_tx_queue *p_tx, struct qed_ll2_tx_queue *p_tx,
struct qed_ll2_tx_packet *p_curp, struct qed_ll2_tx_packet *p_curp,
u8 num_of_bds, struct qed_ll2_tx_pkt_info *pkt,
dma_addr_t first_frag,
u16 first_frag_len, void *p_cookie,
u8 notify_fw) u8 notify_fw)
{ {
list_del(&p_curp->list_entry); list_del(&p_curp->list_entry);
p_curp->cookie = p_cookie; p_curp->cookie = pkt->cookie;
p_curp->bd_used = num_of_bds; p_curp->bd_used = pkt->num_of_bds;
p_curp->notify_fw = notify_fw; p_curp->notify_fw = notify_fw;
p_tx->cur_send_packet = p_curp; p_tx->cur_send_packet = p_curp;
p_tx->cur_send_frag_num = 0; p_tx->cur_send_frag_num = 0;
p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag; p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len; p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
p_tx->cur_send_frag_num++; p_tx->cur_send_frag_num++;
} }
...@@ -1614,51 +1558,52 @@ static void ...@@ -1614,51 +1558,52 @@ static void
qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2, struct qed_ll2_info *p_ll2,
struct qed_ll2_tx_packet *p_curp, struct qed_ll2_tx_packet *p_curp,
u8 num_of_bds, struct qed_ll2_tx_pkt_info *pkt)
enum core_tx_dest tx_dest,
u16 vlan,
u8 bd_flags,
u16 l4_hdr_offset_w,
enum core_roce_flavor_type roce_flavor,
dma_addr_t first_frag,
u16 first_frag_len)
{ {
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
struct core_tx_bd *start_bd = NULL; struct core_tx_bd *start_bd = NULL;
enum core_roce_flavor_type roce_flavor;
enum core_tx_dest tx_dest;
u16 bd_data = 0, frag_idx; u16 bd_data = 0, frag_idx;
roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
: CORE_RROCE;
tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
: CORE_TX_DEST_LB;
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan); start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(l4_hdr_offset_w)); cpu_to_le16(pkt->l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
bd_data |= bd_flags; bd_data |= pkt->bd_flags;
SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds); SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, first_frag); DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len); start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2), (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
p_ll2->queue_id, p_ll2->queue_id,
p_ll2->cid, p_ll2->cid,
p_ll2->conn.conn_type, p_ll2->input.conn_type,
prod_idx, prod_idx,
first_frag_len, pkt->first_frag_len,
num_of_bds, pkt->num_of_bds,
le32_to_cpu(start_bd->addr.hi), le32_to_cpu(start_bd->addr.hi),
le32_to_cpu(start_bd->addr.lo)); le32_to_cpu(start_bd->addr.lo));
if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds) if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
return; return;
/* Need to provide the packet with additional BDs for frags */ /* Need to provide the packet with additional BDs for frags */
for (frag_idx = p_ll2->tx_queue.cur_send_frag_num; for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
frag_idx < num_of_bds; frag_idx++) { frag_idx < pkt->num_of_bds; frag_idx++) {
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
...@@ -1721,26 +1666,20 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, ...@@ -1721,26 +1666,20 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2), (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
p_ll2_conn->queue_id, p_ll2_conn->queue_id,
p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod); p_ll2_conn->cid,
p_ll2_conn->input.conn_type, db_msg.spq_prod);
} }
int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle, u8 connection_handle,
u8 num_of_bds, struct qed_ll2_tx_pkt_info *pkt,
u16 vlan, bool notify_fw)
u8 bd_flags,
u16 l4_hdr_offset_w,
enum qed_ll2_tx_dest e_tx_dest,
enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw)
{ {
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_tx_packet *p_curp = NULL; struct qed_ll2_tx_packet *p_curp = NULL;
struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_info *p_ll2_conn = NULL;
enum core_roce_flavor_type roce_flavor;
struct qed_ll2_tx_queue *p_tx; struct qed_ll2_tx_queue *p_tx;
struct qed_chain *p_tx_chain; struct qed_chain *p_tx_chain;
enum core_tx_dest tx_dest;
unsigned long flags; unsigned long flags;
int rc = 0; int rc = 0;
...@@ -1750,7 +1689,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1750,7 +1689,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
p_tx = &p_ll2_conn->tx_queue; p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain; p_tx_chain = &p_tx->txq_chain;
if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET) if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
return -EIO; return -EIO;
spin_lock_irqsave(&p_tx->lock, flags); spin_lock_irqsave(&p_tx->lock, flags);
...@@ -1763,7 +1702,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1763,7 +1702,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
if (!list_empty(&p_tx->free_descq)) if (!list_empty(&p_tx->free_descq))
p_curp = list_first_entry(&p_tx->free_descq, p_curp = list_first_entry(&p_tx->free_descq,
struct qed_ll2_tx_packet, list_entry); struct qed_ll2_tx_packet, list_entry);
if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds) if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
p_curp = NULL; p_curp = NULL;
if (!p_curp) { if (!p_curp) {
...@@ -1771,26 +1710,10 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1771,26 +1710,10 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
goto out; goto out;
} }
tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
CORE_TX_DEST_LB;
if (qed_roce_flavor == QED_LL2_ROCE) {
roce_flavor = CORE_ROCE;
} else if (qed_roce_flavor == QED_LL2_RROCE) {
roce_flavor = CORE_RROCE;
} else {
rc = -EINVAL;
goto out;
}
/* Prepare packet and BD, and perhaps send a doorbell to FW */ /* Prepare packet and BD, and perhaps send a doorbell to FW */
qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
num_of_bds, first_frag,
first_frag_len, cookie, notify_fw); qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
num_of_bds, tx_dest,
vlan, bd_flags, l4_hdr_offset_w,
roce_flavor,
first_frag, first_frag_len);
qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
...@@ -1799,11 +1722,12 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1799,11 +1722,12 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle, u8 connection_handle,
dma_addr_t addr, u16 nbytes) dma_addr_t addr, u16 nbytes)
{ {
struct qed_ll2_tx_packet *p_cur_send_packet = NULL; struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_info *p_ll2_conn = NULL;
u16 cur_send_frag_num = 0; u16 cur_send_frag_num = 0;
struct core_tx_bd *p_bd; struct core_tx_bd *p_bd;
...@@ -1838,8 +1762,9 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -1838,8 +1762,9 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
{ {
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_info *p_ll2_conn = NULL;
int rc = -EINVAL; int rc = -EINVAL;
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
...@@ -1869,10 +1794,10 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -1869,10 +1794,10 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle); qed_ll2_rxq_flush(p_hwfn, connection_handle);
} }
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
qed_llh_remove_protocol_filter(p_hwfn, p_ptt, qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8906, 0, 0x8906, 0,
QED_LLH_FILTER_ETHERTYPE); QED_LLH_FILTER_ETHERTYPE);
...@@ -1886,8 +1811,28 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -1886,8 +1811,28 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
return rc; return rc;
} }
void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{ {
struct qed_ooo_buffer *p_buffer;
if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
p_hwfn->p_ooo_info))) {
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
p_buffer->rx_buffer_virt_addr,
p_buffer->rx_buffer_phys_addr);
kfree(p_buffer);
}
}
void qed_ll2_release_connection(void *cxt, u8 connection_handle)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_info *p_ll2_conn = NULL;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
...@@ -1957,6 +1902,27 @@ void qed_ll2_free(struct qed_hwfn *p_hwfn) ...@@ -1957,6 +1902,27 @@ void qed_ll2_free(struct qed_hwfn *p_hwfn)
p_hwfn->p_ll2_info = NULL; p_hwfn->p_ll2_info = NULL;
} }
static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_ll2_stats *p_stats)
{
struct core_ll2_port_stats port_stats;
memset(&port_stats, 0, sizeof(port_stats));
qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
BAR0_MAP_REG_TSDM_RAM +
TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
sizeof(port_stats));
p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
p_stats->gsi_invalid_pkt_length =
HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
p_stats->gsi_unsupported_pkt_typ =
HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
p_stats->gsi_crcchksm_error =
HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
}
static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn, static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_ll2_info *p_ll2_conn, struct qed_ll2_info *p_ll2_conn,
...@@ -2020,9 +1986,10 @@ static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn, ...@@ -2020,9 +1986,10 @@ static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts); p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
} }
int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats) u8 connection_handle, struct qed_ll2_stats *p_stats)
{ {
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
...@@ -2040,6 +2007,8 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, ...@@ -2040,6 +2007,8 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
return -EINVAL; return -EINVAL;
} }
if (p_ll2_conn->input.gsi_enable)
_qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats); _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
if (p_ll2_conn->tx_stats_en) if (p_ll2_conn->tx_stats_en)
...@@ -2049,6 +2018,17 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, ...@@ -2049,6 +2018,17 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static void qed_ll2b_release_rx_packet(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
bool b_last_packet)
{
struct qed_hwfn *p_hwfn = cxt;
qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
}
static void qed_ll2_register_cb_ops(struct qed_dev *cdev, static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
const struct qed_ll2_cb_ops *ops, const struct qed_ll2_cb_ops *ops,
void *cookie) void *cookie)
...@@ -2057,21 +2037,86 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev, ...@@ -2057,21 +2037,86 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
cdev->ll2->cb_cookie = cookie; cdev->ll2->cb_cookie = cookie;
} }
struct qed_ll2_cbs ll2_cbs = {
.rx_comp_cb = &qed_ll2b_complete_rx_packet,
.rx_release_cb = &qed_ll2b_release_rx_packet,
.tx_comp_cb = &qed_ll2b_complete_tx_packet,
.tx_release_cb = &qed_ll2b_complete_tx_packet,
};
static void qed_ll2_set_conn_data(struct qed_dev *cdev,
struct qed_ll2_acquire_data *data,
struct qed_ll2_params *params,
enum qed_ll2_conn_type conn_type,
u8 *handle, bool lb)
{
memset(data, 0, sizeof(*data));
data->input.conn_type = conn_type;
data->input.mtu = params->mtu;
data->input.rx_num_desc = QED_LL2_RX_SIZE;
data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
data->input.tx_num_desc = QED_LL2_TX_SIZE;
data->p_connection_handle = handle;
data->cbs = &ll2_cbs;
ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
if (lb) {
data->input.tx_tc = OOO_LB_TC;
data->input.tx_dest = QED_LL2_TX_DEST_LB;
} else {
data->input.tx_tc = 0;
data->input.tx_dest = QED_LL2_TX_DEST_NW;
}
}
static int qed_ll2_start_ooo(struct qed_dev *cdev,
struct qed_ll2_params *params)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
struct qed_ll2_acquire_data data;
int rc;
qed_ll2_set_conn_data(cdev, &data, params,
QED_LL2_TYPE_ISCSI_OOO, handle, true);
rc = qed_ll2_acquire_connection(hwfn, &data);
if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
goto out;
}
rc = qed_ll2_establish_connection(hwfn, *handle);
if (rc) {
DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
goto fail;
}
return 0;
fail:
qed_ll2_release_connection(hwfn, *handle);
out:
*handle = QED_LL2_UNUSED_HANDLE;
return rc;
}
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
{ {
struct qed_ll2_conn ll2_info;
struct qed_ll2_buffer *buffer, *tmp_buffer; struct qed_ll2_buffer *buffer, *tmp_buffer;
enum qed_ll2_conn_type conn_type; enum qed_ll2_conn_type conn_type;
struct qed_ll2_acquire_data data;
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
int rc, i; int rc, i;
u8 gsi_enable = 1;
/* Initialize LL2 locks & lists */ /* Initialize LL2 locks & lists */
INIT_LIST_HEAD(&cdev->ll2->list); INIT_LIST_HEAD(&cdev->ll2->list);
spin_lock_init(&cdev->ll2->lock); spin_lock_init(&cdev->ll2->lock);
cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN + cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
L1_CACHE_BYTES + params->mtu; L1_CACHE_BYTES + params->mtu;
cdev->ll2->frags_mapped = params->frags_mapped;
/*Allocate memory for LL2 */ /*Allocate memory for LL2 */
DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n", DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
...@@ -2096,11 +2141,9 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ...@@ -2096,11 +2141,9 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
case QED_PCI_FCOE: case QED_PCI_FCOE:
conn_type = QED_LL2_TYPE_FCOE; conn_type = QED_LL2_TYPE_FCOE;
gsi_enable = 0;
break; break;
case QED_PCI_ISCSI: case QED_PCI_ISCSI:
conn_type = QED_LL2_TYPE_ISCSI; conn_type = QED_LL2_TYPE_ISCSI;
gsi_enable = 0;
break; break;
case QED_PCI_ETH_ROCE: case QED_PCI_ETH_ROCE:
conn_type = QED_LL2_TYPE_ROCE; conn_type = QED_LL2_TYPE_ROCE;
...@@ -2109,20 +2152,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ...@@ -2109,20 +2152,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
conn_type = QED_LL2_TYPE_TEST; conn_type = QED_LL2_TYPE_TEST;
} }
/* Prepare the temporary ll2 information */ qed_ll2_set_conn_data(cdev, &data, params, conn_type,
memset(&ll2_info, 0, sizeof(ll2_info)); &cdev->ll2->handle, false);
ll2_info.conn_type = conn_type;
ll2_info.mtu = params->mtu;
ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
ll2_info.tx_tc = 0;
ll2_info.tx_dest = CORE_TX_DEST_NW;
ll2_info.gsi_enable = gsi_enable;
rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info, rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
&cdev->ll2->handle);
if (rc) { if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 connection\n"); DP_INFO(cdev, "Failed to acquire LL2 connection\n");
goto fail; goto fail;
...@@ -2245,6 +2278,7 @@ static int qed_ll2_stop(struct qed_dev *cdev) ...@@ -2245,6 +2278,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
{ {
struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag; const skb_frag_t *frag;
int rc = -EINVAL, i; int rc = -EINVAL, i;
dma_addr_t mapping; dma_addr_t mapping;
...@@ -2279,33 +2313,31 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) ...@@ -2279,33 +2313,31 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT); flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
} }
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), memset(&pkt, 0, sizeof(pkt));
cdev->ll2->handle, pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
1 + skb_shinfo(skb)->nr_frags, pkt.vlan = vlan;
vlan, flags, 0, QED_LL2_TX_DEST_NW, pkt.bd_flags = flags;
0 /* RoCE FLAVOR */, pkt.tx_dest = QED_LL2_TX_DEST_NW;
mapping, skb->len, skb, 1); pkt.first_frag = mapping;
pkt.first_frag_len = skb->len;
pkt.cookie = skb;
rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
&pkt, 1);
if (rc) if (rc)
goto err; goto err;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i]; frag = &skb_shinfo(skb)->frags[i];
if (!cdev->ll2->frags_mapped) {
mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
skb_frag_size(frag), skb_frag_size(frag), DMA_TO_DEVICE);
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&cdev->pdev->dev, if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
mapping))) {
DP_NOTICE(cdev, DP_NOTICE(cdev,
"Unable to map frag - dropping packet\n"); "Unable to map frag - dropping packet\n");
rc = -ENOMEM;
goto err; goto err;
} }
} else {
mapping = page_to_phys(skb_frag_page(frag)) |
frag->page_offset;
}
rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev), rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
cdev->ll2->handle, cdev->ll2->handle,
......
...@@ -47,29 +47,6 @@ ...@@ -47,29 +47,6 @@
#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4) #define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
enum qed_ll2_roce_flavor_type {
QED_LL2_ROCE,
QED_LL2_RROCE,
MAX_QED_LL2_ROCE_FLAVOR_TYPE
};
enum qed_ll2_conn_type {
QED_LL2_TYPE_FCOE,
QED_LL2_TYPE_ISCSI,
QED_LL2_TYPE_TEST,
QED_LL2_TYPE_ISCSI_OOO,
QED_LL2_TYPE_RESERVED2,
QED_LL2_TYPE_ROCE,
QED_LL2_TYPE_RESERVED3,
MAX_QED_LL2_RX_CONN_TYPE
};
enum qed_ll2_tx_dest {
QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
QED_LL2_TX_DEST_MAX
};
struct qed_ll2_rx_packet { struct qed_ll2_rx_packet {
struct list_head list_entry; struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd; struct core_rx_bd_with_buff_len *rxq_bd;
...@@ -135,30 +112,21 @@ struct qed_ll2_tx_queue { ...@@ -135,30 +112,21 @@ struct qed_ll2_tx_queue {
bool b_completing_packet; bool b_completing_packet;
}; };
struct qed_ll2_conn {
enum qed_ll2_conn_type conn_type;
u16 mtu;
u8 rx_drop_ttl0_flg;
u8 rx_vlan_removal_en;
u8 tx_tc;
enum core_tx_dest tx_dest;
enum core_error_handle ai_err_packet_too_big;
enum core_error_handle ai_err_no_buf;
u8 gsi_enable;
};
struct qed_ll2_info { struct qed_ll2_info {
/* Lock protecting the state of LL2 */ /* Lock protecting the state of LL2 */
struct mutex mutex; struct mutex mutex;
struct qed_ll2_conn conn;
struct qed_ll2_acquire_data_inputs input;
u32 cid; u32 cid;
u8 my_id; u8 my_id;
u8 queue_id; u8 queue_id;
u8 tx_stats_id; u8 tx_stats_id;
bool b_active; bool b_active;
enum core_tx_dest tx_dest;
u8 tx_stats_en; u8 tx_stats_en;
struct qed_ll2_rx_queue rx_queue; struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue; struct qed_ll2_tx_queue tx_queue;
struct qed_ll2_cbs cbs;
}; };
/** /**
...@@ -166,38 +134,30 @@ struct qed_ll2_info { ...@@ -166,38 +134,30 @@ struct qed_ll2_info {
* starts rx & tx (if relevant) queues pair. Provides * starts rx & tx (if relevant) queues pair. Provides
* connecion handler as output parameter. * connecion handler as output parameter.
* *
* @param p_hwfn
* @param p_params Contain various configuration properties
* @param rx_num_desc
* @param tx_num_desc
*
* @param p_connection_handle Output container for LL2 connection's handle
* *
* @return 0 on success, failure otherwise * @param cxt - pointer to the hw-function [opaque to some]
* @param data - describes connection parameters
* @return int
*/ */
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
struct qed_ll2_conn *p_params,
u16 rx_num_desc,
u16 tx_num_desc,
u8 *p_connection_handle);
/** /**
* @brief qed_ll2_establish_connection - start previously * @brief qed_ll2_establish_connection - start previously
* allocated LL2 queues pair * allocated LL2 queues pair
* *
* @param p_hwfn * @param cxt - pointer to the hw-function [opaque to some]
* @param p_ptt * @param p_ptt
* @param connection_handle LL2 connection's handle obtained from * @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection * qed_ll2_require_connection
* *
* @return 0 on success, failure otherwise * @return 0 on success, failure otherwise
*/ */
int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
/** /**
* @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue. * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
* *
* @param p_hwfn * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from * @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection * qed_ll2_require_connection
* @param addr rx (physical address) buffers to submit * @param addr rx (physical address) buffers to submit
...@@ -206,7 +166,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); ...@@ -206,7 +166,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
* *
* @return 0 on success, failure otherwise * @return 0 on success, failure otherwise
*/ */
int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle, u8 connection_handle,
dma_addr_t addr, dma_addr_t addr,
u16 buf_len, void *cookie, u8 notify_fw); u16 buf_len, void *cookie, u8 notify_fw);
...@@ -215,53 +175,34 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, ...@@ -215,53 +175,34 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
* @brief qed_ll2_prepare_tx_packet - request for start Tx BD * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
* to prepare Tx packet submission to FW. * to prepare Tx packet submission to FW.
* *
* @param p_hwfn * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from * @param connection_handle
* qed_ll2_require_connection * @param pkt - info regarding the tx packet
* @param num_of_bds a number of requested BD equals a number of * @param notify_fw - issue doorbell to fw for this packet
* fragments in Tx packet
* @param vlan VLAN to insert to packet (if insertion set)
* @param bd_flags
* @param l4_hdr_offset_w L4 Header Offset from start of packet
* (in words). This is needed if both l4_csum
* and ipv6_ext are set
* @param e_tx_dest indicates if the packet is to be transmitted via
* loopback or to the network
* @param first_frag
* @param first_frag_len
* @param cookie
*
* @param notify_fw
* *
* @return 0 on success, failure otherwise * @return 0 on success, failure otherwise
*/ */
int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle, u8 connection_handle,
u8 num_of_bds, struct qed_ll2_tx_pkt_info *pkt,
u16 vlan, bool notify_fw);
u8 bd_flags,
u16 l4_hdr_offset_w,
enum qed_ll2_tx_dest e_tx_dest,
enum qed_ll2_roce_flavor_type qed_roce_flavor,
dma_addr_t first_frag,
u16 first_frag_len, void *cookie, u8 notify_fw);
/** /**
* @brief qed_ll2_release_connection - releases resources * @brief qed_ll2_release_connection - releases resources
* allocated for LL2 connection * allocated for LL2 connection
* *
* @param p_hwfn * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from * @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection * qed_ll2_require_connection
*/ */
void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); void qed_ll2_release_connection(void *cxt, u8 connection_handle);
/** /**
* @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
* Tx BD of BDs requested by * Tx BD of BDs requested by
* qed_ll2_prepare_tx_packet * qed_ll2_prepare_tx_packet
* *
* @param p_hwfn * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle * @param connection_handle LL2 connection's handle
* obtained from * obtained from
* qed_ll2_require_connection * qed_ll2_require_connection
...@@ -270,7 +211,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); ...@@ -270,7 +211,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
* *
* @return 0 on success, failure otherwise * @return 0 on success, failure otherwise
*/ */
int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle, u8 connection_handle,
dma_addr_t addr, u16 nbytes); dma_addr_t addr, u16 nbytes);
...@@ -278,27 +219,27 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, ...@@ -278,27 +219,27 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
* @brief qed_ll2_terminate_connection - stops Tx/Rx queues * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
* *
* *
* @param p_hwfn * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle * @param connection_handle LL2 connection's handle
* obtained from * obtained from
* qed_ll2_require_connection * qed_ll2_require_connection
* *
* @return 0 on success, failure otherwise * @return 0 on success, failure otherwise
*/ */
int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
/** /**
* @brief qed_ll2_get_stats - get LL2 queue's statistics * @brief qed_ll2_get_stats - get LL2 queue's statistics
* *
* *
* @param p_hwfn * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from * @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection * qed_ll2_require_connection
* @param p_stats * @param p_stats
* *
* @return 0 on success, failure otherwise * @return 0 on success, failure otherwise
*/ */
int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats); u8 connection_handle, struct qed_ll2_stats *p_stats);
/** /**
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/io.h> #include <linux/io.h>
...@@ -65,6 +64,7 @@ ...@@ -65,6 +64,7 @@
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_roce.h" #include "qed_roce.h"
#include "qed_ll2.h" #include "qed_ll2.h"
#include <linux/qed/qed_ll2_if.h>
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
...@@ -2709,301 +2709,35 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) ...@@ -2709,301 +2709,35 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock); spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
} }
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet)
{
struct qed_roce_ll2_packet *packet = cookie;
struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
}
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet)
{
qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
cookie, first_frag_addr,
b_last_fragment, b_last_packet);
}
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo, bool b_last_packet)
{
struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
struct qed_roce_ll2_rx_params params;
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_roce_ll2_packet pkt;
DP_VERBOSE(cdev,
QED_MSG_LL2,
"roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
(void *)(uintptr_t)rx_buf_addr,
data_length, data_length_error);
memset(&pkt, 0, sizeof(pkt));
pkt.n_seg = 1;
pkt.payload[0].baddr = rx_buf_addr;
pkt.payload[0].len = data_length;
memset(&params, 0, sizeof(params));
params.vlan_id = vlan;
*((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
*((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
if (data_length_error) {
DP_ERR(cdev,
"roce ll2 rx complete: data length error %d, length=%d\n",
data_length_error, data_length);
params.rc = -EINVAL;
}
roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
}
static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
u8 *old_mac_address, u8 *old_mac_address,
u8 *new_mac_address) u8 *new_mac_address)
{ {
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
int rc = 0; int rc = 0;
if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) { p_ptt = qed_ptt_acquire(p_hwfn);
DP_ERR(cdev,
"qed roce mac filter failed - roce_info/ll2 NULL\n");
return -EINVAL;
}
p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (!p_ptt) { if (!p_ptt) {
DP_ERR(cdev, DP_ERR(cdev,
"qed roce ll2 mac filter set: failed to acquire PTT\n"); "qed roce ll2 mac filter set: failed to acquire PTT\n");
return -EINVAL; return -EINVAL;
} }
mutex_lock(&hwfn->ll2->lock);
if (old_mac_address) if (old_mac_address)
qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
old_mac_address);
if (new_mac_address) if (new_mac_address)
rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
new_mac_address);
mutex_unlock(&hwfn->ll2->lock);
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
if (rc)
DP_ERR(cdev,
"qed roce ll2 mac filter set: failed to add mac filter\n");
return rc; qed_ptt_release(p_hwfn, p_ptt);
}
static int qed_roce_ll2_start(struct qed_dev *cdev,
struct qed_roce_ll2_params *params)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2;
struct qed_ll2_conn ll2_params;
int rc;
if (!params) {
DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
return -EINVAL;
}
if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
DP_ERR(cdev,
"qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
params->cbs.tx_cb, params->cbs.rx_cb);
return -EINVAL;
}
if (!is_valid_ether_addr(params->mac_address)) {
DP_ERR(cdev,
"qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
params->mac_address);
return -EINVAL;
}
/* Initialize */
roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
if (!roce_ll2) {
DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
return -ENOMEM;
}
roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
roce_ll2->cbs = params->cbs;
roce_ll2->cb_cookie = params->cb_cookie;
mutex_init(&roce_ll2->lock);
memset(&ll2_params, 0, sizeof(ll2_params));
ll2_params.conn_type = QED_LL2_TYPE_ROCE;
ll2_params.mtu = params->mtu;
ll2_params.rx_drop_ttl0_flg = true;
ll2_params.rx_vlan_removal_en = false;
ll2_params.tx_dest = CORE_TX_DEST_NW;
ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
ll2_params.gsi_enable = true;
rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
params->max_rx_buffers,
params->max_tx_buffers,
&roce_ll2->handle);
if (rc) {
DP_ERR(cdev,
"qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
rc);
goto err;
}
rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
roce_ll2->handle);
if (rc) {
DP_ERR(cdev,
"qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
rc);
goto err1;
}
hwfn->ll2 = roce_ll2;
rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
if (rc) {
hwfn->ll2 = NULL;
goto err2;
}
ether_addr_copy(roce_ll2->mac_address, params->mac_address);
return 0;
err2:
qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
err1:
qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
err:
kfree(roce_ll2);
return rc;
}
static int qed_roce_ll2_stop(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
int rc;
if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
return -EINVAL;
}
/* remove LL2 MAC address filter */
rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
eth_zero_addr(roce_ll2->mac_address);
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
roce_ll2->handle);
if (rc) if (rc)
DP_ERR(cdev, DP_ERR(cdev,
"qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n", "qed roce ll2 mac filter set: failed to add MAC filter\n");
rc);
qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
kfree(roce_ll2);
return rc; return rc;
} }
static int qed_roce_ll2_tx(struct qed_dev *cdev,
struct qed_roce_ll2_packet *pkt,
struct qed_roce_ll2_tx_params *params)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
enum qed_ll2_roce_flavor_type qed_roce_flavor;
u8 flags = 0;
int rc;
int i;
if (!pkt || !params) {
DP_ERR(cdev,
"roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
cdev, pkt, params);
return -EINVAL;
}
qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
: QED_LL2_RROCE;
if (pkt->roce_mode == ROCE_V2_IPV4)
flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
/* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
1 + pkt->n_seg, 0, flags, 0,
QED_LL2_TX_DEST_NW,
qed_roce_flavor, pkt->header.baddr,
pkt->header.len, pkt, 1);
if (rc) {
DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
return QED_ROCE_TX_HEAD_FAILURE;
}
/* Tx payload */
for (i = 0; i < pkt->n_seg; i++) {
rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
roce_ll2->handle,
pkt->payload[i].baddr,
pkt->payload[i].len);
if (rc) {
/* If failed not much to do here, partial packet has
* been posted * we can't free memory, will need to wait
* for completion
*/
DP_ERR(cdev,
"roce ll2 tx: payload failed (rc=%d)\n", rc);
return QED_ROCE_TX_FRAG_FAILURE;
}
}
return 0;
}
static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
struct qed_roce_ll2_buffer *buf,
u64 cookie, u8 notify_fw)
{
return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
QED_LEADING_HWFN(cdev)->ll2->handle,
buf->baddr, buf->len,
(void *)(uintptr_t)cookie, notify_fw);
}
static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
roce_ll2->handle, stats);
}
static const struct qed_rdma_ops qed_rdma_ops_pass = { static const struct qed_rdma_ops qed_rdma_ops_pass = {
.common = &qed_common_ops_pass, .common = &qed_common_ops_pass,
.fill_dev_info = &qed_fill_rdma_dev_info, .fill_dev_info = &qed_fill_rdma_dev_info,
...@@ -3031,12 +2765,15 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { ...@@ -3031,12 +2765,15 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.rdma_free_tid = &qed_rdma_free_tid, .rdma_free_tid = &qed_rdma_free_tid,
.rdma_register_tid = &qed_rdma_register_tid, .rdma_register_tid = &qed_rdma_register_tid,
.rdma_deregister_tid = &qed_rdma_deregister_tid, .rdma_deregister_tid = &qed_rdma_deregister_tid,
.roce_ll2_start = &qed_roce_ll2_start, .ll2_acquire_connection = &qed_ll2_acquire_connection,
.roce_ll2_stop = &qed_roce_ll2_stop, .ll2_establish_connection = &qed_ll2_establish_connection,
.roce_ll2_tx = &qed_roce_ll2_tx, .ll2_terminate_connection = &qed_ll2_terminate_connection,
.roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer, .ll2_release_connection = &qed_ll2_release_connection,
.roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
.roce_ll2_stats = &qed_roce_ll2_stats, .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
.ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
.ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
.ll2_get_stats = &qed_ll2_get_stats,
}; };
const struct qed_rdma_ops *qed_get_rdma_ops(void) const struct qed_rdma_ops *qed_get_rdma_ops(void)
......
...@@ -170,53 +170,10 @@ struct qed_rdma_qp { ...@@ -170,53 +170,10 @@ struct qed_rdma_qp {
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_roce_async_event(struct qed_hwfn *p_hwfn, void qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, union rdma_eqe_data *rdma_data); u8 fw_event_code, union rdma_eqe_data *rdma_data);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo, bool b_last_packet);
#else #else
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn, static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, u8 fw_event_code,
union rdma_eqe_data *rdma_data) {} union rdma_eqe_data *rdma_data) {}
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet) {}
static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet) {}
static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo,
bool b_last_packet) {}
#endif #endif
#endif #endif
...@@ -43,6 +43,35 @@ ...@@ -43,6 +43,35 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
enum qed_ll2_conn_type {
QED_LL2_TYPE_FCOE,
QED_LL2_TYPE_ISCSI,
QED_LL2_TYPE_TEST,
QED_LL2_TYPE_ISCSI_OOO,
QED_LL2_TYPE_RESERVED2,
QED_LL2_TYPE_ROCE,
QED_LL2_TYPE_RESERVED3,
MAX_QED_LL2_RX_CONN_TYPE
};
enum qed_ll2_roce_flavor_type {
QED_LL2_ROCE,
QED_LL2_RROCE,
MAX_QED_LL2_ROCE_FLAVOR_TYPE
};
enum qed_ll2_tx_dest {
QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
QED_LL2_TX_DEST_MAX
};
enum qed_ll2_error_handle {
QED_LL2_DROP_PACKET,
QED_LL2_DO_NOTHING,
QED_LL2_ASSERT,
};
struct qed_ll2_stats { struct qed_ll2_stats {
u64 gsi_invalid_hdr; u64 gsi_invalid_hdr;
u64 gsi_invalid_pkt_length; u64 gsi_invalid_pkt_length;
...@@ -67,6 +96,105 @@ struct qed_ll2_stats { ...@@ -67,6 +96,105 @@ struct qed_ll2_stats {
u64 sent_bcast_pkts; u64 sent_bcast_pkts;
}; };
struct qed_ll2_comp_rx_data {
void *cookie;
dma_addr_t rx_buf_addr;
u16 parse_flags;
u16 vlan;
bool b_last_packet;
u8 connection_handle;
union {
u16 packet_length;
u16 data_length;
} length;
u32 opaque_data_0;
u32 opaque_data_1;
/* GSI only */
u32 gid_dst[4];
u16 qp_id;
union {
u8 placement_offset;
u8 data_length_error;
} u;
};
typedef
void (*qed_ll2_complete_rx_packet_cb)(void *cxt,
struct qed_ll2_comp_rx_data *data);
typedef
void (*qed_ll2_release_rx_packet_cb)(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
bool b_last_packet);
typedef
void (*qed_ll2_complete_tx_packet_cb)(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet);
typedef
void (*qed_ll2_release_tx_packet_cb)(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
struct qed_ll2_cbs {
qed_ll2_complete_rx_packet_cb rx_comp_cb;
qed_ll2_release_rx_packet_cb rx_release_cb;
qed_ll2_complete_tx_packet_cb tx_comp_cb;
qed_ll2_release_tx_packet_cb tx_release_cb;
void *cookie;
};
struct qed_ll2_acquire_data_inputs {
enum qed_ll2_conn_type conn_type;
u16 mtu;
u16 rx_num_desc;
u16 rx_num_ooo_buffers;
u8 rx_drop_ttl0_flg;
u8 rx_vlan_removal_en;
u16 tx_num_desc;
u8 tx_max_bds_per_packet;
u8 tx_tc;
enum qed_ll2_tx_dest tx_dest;
enum qed_ll2_error_handle ai_err_packet_too_big;
enum qed_ll2_error_handle ai_err_no_buf;
u8 gsi_enable;
};
struct qed_ll2_acquire_data {
struct qed_ll2_acquire_data_inputs input;
const struct qed_ll2_cbs *cbs;
/* Output container for LL2 connection's handle */
u8 *p_connection_handle;
};
struct qed_ll2_tx_pkt_info {
void *cookie;
dma_addr_t first_frag;
enum qed_ll2_tx_dest tx_dest;
enum qed_ll2_roce_flavor_type qed_roce_flavor;
u16 vlan;
u16 l4_hdr_offset_w; /* from start of packet */
u16 first_frag_len;
u8 num_of_bds;
u8 bd_flags;
bool enable_ip_cksum;
bool enable_l4_cksum;
bool calc_ip_len;
};
#define QED_LL2_UNUSED_HANDLE (0xff) #define QED_LL2_UNUSED_HANDLE (0xff)
struct qed_ll2_cb_ops { struct qed_ll2_cb_ops {
......
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h> #include <linux/qed/qed_ll2_if.h>
...@@ -491,42 +489,6 @@ struct qed_roce_ll2_packet { ...@@ -491,42 +489,6 @@ struct qed_roce_ll2_packet {
enum qed_roce_ll2_tx_dest tx_dest; enum qed_roce_ll2_tx_dest tx_dest;
}; };
struct qed_roce_ll2_tx_params {
int reserved;
};
struct qed_roce_ll2_rx_params {
u16 vlan_id;
u8 smac[ETH_ALEN];
int rc;
};
struct qed_roce_ll2_cbs {
void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
struct qed_roce_ll2_rx_params *params);
};
struct qed_roce_ll2_params {
u16 max_rx_buffers;
u16 max_tx_buffers;
u16 mtu;
u8 mac_address[ETH_ALEN];
struct qed_roce_ll2_cbs cbs;
void *cb_cookie;
};
struct qed_roce_ll2_info {
u8 handle;
struct qed_roce_ll2_cbs cbs;
u8 mac_address[ETH_ALEN];
void *cb_cookie;
/* Lock to protect ll2 */
struct mutex lock;
};
enum qed_rdma_type { enum qed_rdma_type {
QED_RDMA_TYPE_ROCE, QED_RDMA_TYPE_ROCE,
}; };
...@@ -579,26 +541,40 @@ struct qed_rdma_ops { ...@@ -579,26 +541,40 @@ struct qed_rdma_ops {
int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *oparams); struct qed_rdma_query_qp_out_params *oparams);
int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
int int
(*rdma_register_tid)(void *rdma_cxt, (*rdma_register_tid)(void *rdma_cxt,
struct qed_rdma_register_tid_in_params *iparams); struct qed_rdma_register_tid_in_params *iparams);
int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
void (*rdma_free_tid)(void *rdma_cxt, u32 itid); void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
int (*roce_ll2_start)(struct qed_dev *cdev,
struct qed_roce_ll2_params *params); int (*ll2_acquire_connection)(void *rdma_cxt,
int (*roce_ll2_stop)(struct qed_dev *cdev); struct qed_ll2_acquire_data *data);
int (*roce_ll2_tx)(struct qed_dev *cdev,
struct qed_roce_ll2_packet *packet, int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle);
struct qed_roce_ll2_tx_params *params); int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle);
int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev, void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle);
struct qed_roce_ll2_buffer *buf,
u64 cookie, u8 notify_fw); int (*ll2_prepare_tx_packet)(void *rdma_cxt,
int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev, u8 connection_handle,
u8 *old_mac_address, struct qed_ll2_tx_pkt_info *pkt,
u8 *new_mac_address); bool notify_fw);
int (*roce_ll2_stats)(struct qed_dev *cdev,
struct qed_ll2_stats *stats); int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt,
u8 connection_handle,
dma_addr_t addr,
u16 nbytes);
int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle,
dma_addr_t addr, u16 buf_len, void *cookie,
u8 notify_fw);
int (*ll2_get_stats)(void *rdma_cxt,
u8 connection_handle,
struct qed_ll2_stats *p_stats);
int (*ll2_set_mac_filter)(struct qed_dev *cdev,
u8 *old_mac_address, u8 *new_mac_address);
}; };
const struct qed_rdma_ops *qed_get_rdma_ops(void); const struct qed_rdma_ops *qed_get_rdma_ops(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment