Commit c61fcc09 authored by David S. Miller's avatar David S. Miller

Merge branch 'mana-jumbo-frames'

Haiyang Zhang says:

====================
net: mana: Add support for jumbo frame

The set adds support for jumbo frame,
with some optimization for the RX path.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e473ea81 80f6215b
......@@ -133,12 +133,6 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
return act;
}
static unsigned int mana_xdp_fraglen(unsigned int len)
{
return SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
{
ASSERT_RTNL();
......@@ -179,17 +173,18 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
{
struct mana_port_context *apc = netdev_priv(ndev);
struct bpf_prog *old_prog;
int buf_max;
struct gdma_context *gc;
gc = apc->ac->gdma_dev->gdma_context;
old_prog = mana_xdp_get(apc);
if (!old_prog && !prog)
return 0;
buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
if (prog && buf_max > PAGE_SIZE) {
netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
ndev->mtu, buf_max);
if (prog && ndev->mtu > MANA_XDP_MTU_MAX) {
netdev_err(ndev, "XDP: mtu:%u too large, mtu_max:%lu\n",
ndev->mtu, MANA_XDP_MTU_MAX);
NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
return -EOPNOTSUPP;
......@@ -206,6 +201,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
if (apc->port_is_up)
mana_chn_setxdp(apc, prog);
if (prog)
ndev->max_mtu = MANA_XDP_MTU_MAX;
else
ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
return 0;
}
......
......@@ -145,6 +145,7 @@ struct gdma_general_req {
}; /* HW DATA */
#define GDMA_MESSAGE_V1 1
#define GDMA_MESSAGE_V2 2
struct gdma_general_resp {
struct gdma_resp_hdr hdr;
......@@ -354,6 +355,9 @@ struct gdma_context {
struct gdma_resource msix_resource;
struct gdma_irq_context *irq_contexts;
/* L2 MTU */
u16 adapter_mtu;
/* This maps a CQ index to the queue structure. */
unsigned int max_num_cqs;
struct gdma_queue **cq_table;
......
......@@ -36,10 +36,8 @@ enum TRI_STATE {
#define COMP_ENTRY_SIZE 64
#define ADAPTER_MTU_SIZE 1500
#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
#define RX_BUFFERS_PER_QUEUE 512
#define MANA_RX_DATA_ALIGN 64
#define MAX_SEND_BUFFERS_PER_QUEUE 256
......@@ -282,7 +280,6 @@ struct mana_recv_buf_oob {
struct gdma_wqe_request wqe_req;
void *buf_va;
dma_addr_t buf_dma_addr;
/* SGL of the buffer going to be sent has part of the work request. */
u32 num_sge;
......@@ -295,6 +292,11 @@ struct mana_recv_buf_oob {
struct gdma_posted_wqe_info wqe_inf;
};
#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
+ ETH_HLEN)
#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
struct mana_rxq {
struct gdma_queue *gdma_rq;
/* Cache the gdma receive queue id */
......@@ -304,6 +306,8 @@ struct mana_rxq {
u32 rxq_idx;
u32 datasize;
u32 alloc_size;
u32 headroom;
mana_handle_t rxobj;
......@@ -322,7 +326,7 @@ struct mana_rxq {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
struct page *xdp_save_page;
void *xdp_save_va; /* for reusing */
bool xdp_flush;
int xdp_rc; /* XDP redirect return code */
......@@ -387,6 +391,14 @@ struct mana_port_context {
/* This points to an array of num_queues of RQ pointers. */
struct mana_rxq **rxqs;
/* pre-allocated rx buffer array */
void **rxbufs_pre;
dma_addr_t *das_pre;
int rxbpre_total;
u32 rxbpre_datasize;
u32 rxbpre_alloc_size;
u32 rxbpre_headroom;
struct bpf_prog *bpf_prog;
/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
......@@ -486,6 +498,11 @@ struct mana_query_device_cfg_resp {
u16 max_num_vports;
u16 reserved;
u32 max_num_eqs;
/* response v2: */
u16 adapter_mtu;
u16 reserved2;
u32 reserved3;
}; /* HW DATA */
/* Query vPort Configuration */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment