Commit aa565497 authored by Long Li's avatar Long Li Committed by Leon Romanovsky

net: mana: Define max values for SGL entries

The number of maximum SGl entries should be computed from the maximum
WQE size for the intended queue type and the corresponding OOB data
size. This guarantees the hardware queue can successfully queue requests
up to the queue depth exposed to the upper layer.
Reviewed-by: default avatarDexuan Cui <decui@microsoft.com>
Signed-off-by: default avatarLong Li <longli@microsoft.com>
Link: https://lore.kernel.org/r/1667502990-2559-9-git-send-email-longli@linuxonhyperv.comAcked-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
parent fd325cd6
...@@ -189,7 +189,7 @@ int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -189,7 +189,7 @@ int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pkg.wqe_req.client_data_unit = 0; pkg.wqe_req.client_data_unit = 0;
pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
WARN_ON_ONCE(pkg.wqe_req.num_sge > 30); WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
pkg.wqe_req.sgl = pkg.sgl_array; pkg.wqe_req.sgl = pkg.sgl_array;
......
...@@ -427,6 +427,13 @@ struct gdma_wqe { ...@@ -427,6 +427,13 @@ struct gdma_wqe {
#define MAX_TX_WQE_SIZE 512 #define MAX_TX_WQE_SIZE 512
#define MAX_RX_WQE_SIZE 256 #define MAX_RX_WQE_SIZE 256
#define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \
sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
sizeof(struct gdma_sge))
#define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \
sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
struct gdma_cqe { struct gdma_cqe {
u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
......
...@@ -265,8 +265,6 @@ struct mana_cq { ...@@ -265,8 +265,6 @@ struct mana_cq {
int budget; int budget;
}; };
#define GDMA_MAX_RQE_SGES 15
struct mana_recv_buf_oob { struct mana_recv_buf_oob {
/* A valid GDMA work request representing the data buffer. */ /* A valid GDMA work request representing the data buffer. */
struct gdma_wqe_request wqe_req; struct gdma_wqe_request wqe_req;
...@@ -276,7 +274,7 @@ struct mana_recv_buf_oob { ...@@ -276,7 +274,7 @@ struct mana_recv_buf_oob {
/* SGL of the buffer going to be sent has part of the work request. */ /* SGL of the buffer going to be sent has part of the work request. */
u32 num_sge; u32 num_sge;
struct gdma_sge sgl[GDMA_MAX_RQE_SGES]; struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
/* Required to store the result of mana_gd_post_work_request. /* Required to store the result of mana_gd_post_work_request.
* gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment