Commit 21453285 authored by Long Li's avatar Long Li Committed by Jason Gunthorpe

RDMA/mana_ib: Use v2 version of cfg_rx_steer_req to enable RX coalescing

With RX coalescing, one CQE entry can be used to indicate multiple packets
on the receive queue. This saves processing time and PCI bandwidth over
the CQ.

The MANA Ethernet driver also uses the v2 version of the protocol. It
doesn't use RX coalescing and its behavior is not changed.

Link: https://lore.kernel.org/r/1684045095-31228-1-git-send-email-longli@linuxonhyperv.comSigned-off-by: default avatarLong Li <longli@microsoft.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 8c1ee346
......@@ -13,7 +13,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
u8 *rx_hash_key)
{
struct mana_port_context *mpc = netdev_priv(ndev);
struct mana_cfg_rx_steer_req *req = NULL;
struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
mana_handle_t *req_indir_tab;
struct gdma_context *gc;
......@@ -33,6 +33,8 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
sizeof(resp));
req->hdr.req.msg_version = GDMA_MESSAGE_V2;
req->vport = mpc->port_handle;
req->rx_enable = 1;
req->update_default_rxobj = 1;
......@@ -46,6 +48,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
req->indir_tab_offset = sizeof(*req);
req->update_indir_tab = true;
req->cqe_coalescing_enable = 1;
req_indir_tab = (mana_handle_t *)(req + 1);
/* The ind table passed to the hardware must have
......
......@@ -972,7 +972,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
bool update_tab)
{
u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
struct mana_cfg_rx_steer_req *req = NULL;
struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
struct net_device *ndev = apc->ndev;
mana_handle_t *req_indir_tab;
......@@ -987,6 +987,8 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
sizeof(resp));
req->hdr.req.msg_version = GDMA_MESSAGE_V2;
req->vport = apc->port_handle;
req->num_indir_entries = num_entries;
req->indir_tab_offset = sizeof(*req);
......@@ -996,6 +998,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
req->update_hashkey = update_key;
req->update_indir_tab = update_tab;
req->default_rxobj = apc->default_rxobj;
req->cqe_coalescing_enable = 0;
if (update_key)
memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
......
......@@ -581,7 +581,7 @@ struct mana_fence_rq_resp {
}; /* HW DATA */
/* Configure vPort Rx Steering */
struct mana_cfg_rx_steer_req {
struct mana_cfg_rx_steer_req_v2 {
struct gdma_req_hdr hdr;
mana_handle_t vport;
u16 num_indir_entries;
......@@ -594,6 +594,8 @@ struct mana_cfg_rx_steer_req {
u8 reserved;
mana_handle_t default_rxobj;
u8 hashkey[MANA_HASH_KEY_SIZE];
u8 cqe_coalescing_enable;
u8 reserved2[7];
}; /* HW DATA */
struct mana_cfg_rx_steer_resp {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment