Commit 8ac718cc authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-fixes'

Michael Chan says:

====================
bnxt_en: Bug fixes

This series fixes a devlink bug and several XDP related bugs.  The
devlink bug causes a kernel crash on VF devices.  The XDP driver
patches fix and clean up the RX XDP path and re-enable header-data
split that was disabled by mistake when adding the XDP multi-buffer
support.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3ec3ebec a056ebcc
...@@ -991,8 +991,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, ...@@ -991,8 +991,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset; dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING); DMA_ATTR_WEAK_ORDERING);
skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE + skb = build_skb(page_address(page), PAGE_SIZE);
bp->rx_dma_offset);
if (!skb) { if (!skb) {
__free_page(page); __free_page(page);
return NULL; return NULL;
...@@ -1925,7 +1924,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -1925,7 +1924,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
dma_addr = rx_buf->mapping; dma_addr = rx_buf->mapping;
if (bnxt_xdp_attached(bp, rxr)) { if (bnxt_xdp_attached(bp, rxr)) {
bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp); bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
if (agg_bufs) { if (agg_bufs) {
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs, cp_cons, agg_bufs,
...@@ -1940,7 +1939,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -1940,7 +1939,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} }
if (xdp_active) { if (xdp_active) {
if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) { if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
rc = 1; rc = 1;
goto next_rx; goto next_rx;
} }
...@@ -3969,8 +3968,10 @@ void bnxt_set_ring_params(struct bnxt *bp) ...@@ -3969,8 +3968,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
if (BNXT_RX_PAGE_MODE(bp)) { if (BNXT_RX_PAGE_MODE(bp)) {
rx_space = BNXT_PAGE_MODE_BUF_SIZE; rx_space = PAGE_SIZE;
rx_size = BNXT_MAX_PAGE_MODE_MTU; rx_size = PAGE_SIZE -
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else { } else {
rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD + rx_space = rx_size + NET_SKB_PAD +
...@@ -5398,15 +5399,16 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) ...@@ -5398,15 +5399,16 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) { if (BNXT_RX_PAGE_MODE(bp)) {
req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
} else {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |= req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
} }
/* thresholds not implemented in firmware yet */
req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req); return hwrm_req_send(bp, req);
} }
...@@ -13591,7 +13593,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -13591,7 +13593,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM; return -ENOMEM;
bp = netdev_priv(dev); bp = netdev_priv(dev);
SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
bp->board_idx = ent->driver_data; bp->board_idx = ent->driver_data;
bp->msg_enable = BNXT_DEF_MSG_ENABLE; bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs); bnxt_set_max_func_irqs(bp, max_irqs);
...@@ -13599,6 +13600,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -13599,6 +13600,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bnxt_vf_pciid(bp->board_idx)) if (bnxt_vf_pciid(bp->board_idx))
bp->flags |= BNXT_FLAG_VF; bp->flags |= BNXT_FLAG_VF;
/* No devlink port registration in case of a VF */
if (BNXT_PF(bp))
SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
if (pdev->msix_cap) if (pdev->msix_cap)
bp->flags |= BNXT_FLAG_MSIX_CAP; bp->flags |= BNXT_FLAG_MSIX_CAP;
......
...@@ -591,12 +591,20 @@ struct nqe_cn { ...@@ -591,12 +591,20 @@ struct nqe_cn {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
#define BNXT_MAX_MTU 9500 #define BNXT_MAX_MTU 9500
#define BNXT_PAGE_MODE_BUF_SIZE \
/* First RX buffer page in XDP multi-buf mode
*
* +-------------------------------------------------------------------------+
* | XDP_PACKET_HEADROOM | bp->rx_buf_use_size | skb_shared_info|
* | (bp->rx_dma_offset) | | |
* +-------------------------------------------------------------------------+
*/
#define BNXT_MAX_PAGE_MODE_MTU_SBUF \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
XDP_PACKET_HEADROOM) XDP_PACKET_HEADROOM)
#define BNXT_MAX_PAGE_MODE_MTU \ #define BNXT_MAX_PAGE_MODE_MTU \
BNXT_PAGE_MODE_BUF_SIZE - \ (BNXT_MAX_PAGE_MODE_MTU_SBUF - \
SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)) SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
#define BNXT_MIN_PKT_SIZE 52 #define BNXT_MIN_PKT_SIZE 52
...@@ -2134,7 +2142,6 @@ struct bnxt { ...@@ -2134,7 +2142,6 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1 #define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
u8 xdp_has_frags;
struct bnxt_ptp_cfg *ptp_cfg; struct bnxt_ptp_cfg *ptp_cfg;
u8 ptp_all_rx_tstamp; u8 ptp_all_rx_tstamp;
......
...@@ -177,7 +177,7 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) ...@@ -177,7 +177,7 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
} }
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 **data_ptr, unsigned int *len, u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
struct bnxt_sw_rx_bd *rx_buf; struct bnxt_sw_rx_bd *rx_buf;
...@@ -191,13 +191,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -191,13 +191,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
offset = bp->rx_offset; offset = bp->rx_offset;
mapping = rx_buf->mapping - bp->rx_dma_offset; mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
if (bp->xdp_has_frags)
buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false); xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
} }
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
...@@ -222,7 +219,8 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, ...@@ -222,7 +219,8 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
* false - packet should be passed to the stack. * false - packet should be passed to the stack.
*/ */
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event) struct xdp_buff xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event)
{ {
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr; struct bnxt_tx_ring_info *txr;
...@@ -255,8 +253,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -255,8 +253,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event &= ~BNXT_RX_EVENT; *event &= ~BNXT_RX_EVENT;
*len = xdp.data_end - xdp.data; *len = xdp.data_end - xdp.data;
if (orig_data != xdp.data) if (orig_data != xdp.data) {
offset = xdp.data - xdp.data_hard_start; offset = xdp.data - xdp.data_hard_start;
*data_ptr = xdp.data_hard_start + offset;
}
switch (act) { switch (act) {
case XDP_PASS: case XDP_PASS:
...@@ -401,10 +401,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) ...@@ -401,10 +401,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (prog) { if (prog)
tx_xdp = bp->rx_nr_rings; tx_xdp = bp->rx_nr_rings;
bp->xdp_has_frags = prog->aux->xdp_has_frags;
}
tc = netdev_get_num_tc(dev); tc = netdev_get_num_tc(dev);
if (!tc) if (!tc)
......
...@@ -18,8 +18,8 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, ...@@ -18,8 +18,8 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp); struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff xdp, struct page *page, unsigned int *len, struct xdp_buff xdp, struct page *page, u8 **data_ptr,
u8 *event); unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames, int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags); struct xdp_frame **frames, u32 flags);
...@@ -27,7 +27,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, ...@@ -27,7 +27,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr); bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 **data_ptr, unsigned int *len, u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp); struct xdp_buff *xdp);
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp); struct xdp_buff *xdp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment