Commit a0b4a80e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-08-20 (ice)

This series contains updates to ice driver only.

Maciej fixes issues with Rx data path on architectures with
PAGE_SIZE >= 8192; correcting page reuse usage and calculations for
last offset and truesize.

Michal corrects assignment of devlink port number to use PF id.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ice: use internal pf id instead of function number
  ice: fix truesize operations for PAGE_SIZE >= 8192
  ice: fix ICE_LAST_OFFSET formula
  ice: fix page reuse when PAGE_SIZE is over 8k
====================

Link: https://patch.msgid.link/20240820215620.1245310-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 8baeef76 503ab6ee
...@@ -337,7 +337,7 @@ int ice_devlink_create_pf_port(struct ice_pf *pf) ...@@ -337,7 +337,7 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
return -EIO; return -EIO;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pf->hw.bus.func; attrs.phys.port_number = pf->hw.pf_id;
/* As FW supports only port split options for whole device, /* As FW supports only port split options for whole device,
* set port split options only for first PF. * set port split options only for first PF.
...@@ -455,7 +455,7 @@ int ice_devlink_create_vf_port(struct ice_vf *vf) ...@@ -455,7 +455,7 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
return -EINVAL; return -EINVAL;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
attrs.pci_vf.pf = pf->hw.bus.func; attrs.pci_vf.pf = pf->hw.pf_id;
attrs.pci_vf.vf = vf->vf_id; attrs.pci_vf.vf = vf->vf_id;
ice_devlink_set_switch_id(pf, &attrs.switch_id); ice_devlink_set_switch_id(pf, &attrs.switch_id);
......
...@@ -512,6 +512,25 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) ...@@ -512,6 +512,25 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
xsk_pool_fill_cb(ring->xsk_pool, &desc); xsk_pool_fill_cb(ring->xsk_pool, &desc);
} }
/**
* ice_get_frame_sz - calculate xdp_buff::frame_sz
* @rx_ring: the ring being configured
*
* Return frame size based on underlying PAGE_SIZE
*/
static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
{
unsigned int frame_sz;
#if (PAGE_SIZE >= 8192)
frame_sz = rx_ring->rx_buf_len;
#else
frame_sz = ice_rx_pg_size(rx_ring) / 2;
#endif
return frame_sz;
}
/** /**
* ice_vsi_cfg_rxq - Configure an Rx queue * ice_vsi_cfg_rxq - Configure an Rx queue
* @ring: the ring being configured * @ring: the ring being configured
...@@ -576,7 +595,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ...@@ -576,7 +595,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
} }
} }
xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq); xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq);
ring->xdp.data = NULL; ring->xdp.data = NULL;
ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring); err = ice_setup_rx_ctx(ring);
......
...@@ -521,30 +521,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) ...@@ -521,30 +521,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
return -ENOMEM; return -ENOMEM;
} }
/**
* ice_rx_frame_truesize
* @rx_ring: ptr to Rx ring
* @size: size
*
* calculate the truesize with taking into the account PAGE_SIZE of
* underlying arch
*/
static unsigned int
ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
{
unsigned int truesize;
#if (PAGE_SIZE < 8192)
truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
#else
truesize = rx_ring->rx_offset ?
SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
#endif
return truesize;
}
/** /**
* ice_run_xdp - Executes an XDP program on initialized xdp_buff * ice_run_xdp - Executes an XDP program on initialized xdp_buff
* @rx_ring: Rx ring * @rx_ring: Rx ring
...@@ -837,16 +813,15 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) ...@@ -837,16 +813,15 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
if (!dev_page_is_reusable(page)) if (!dev_page_is_reusable(page))
return false; return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
return false; return false;
#else #if (PAGE_SIZE >= 8192)
#define ICE_LAST_OFFSET \ #define ICE_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072)
if (rx_buf->page_offset > ICE_LAST_OFFSET) if (rx_buf->page_offset > ICE_LAST_OFFSET)
return false; return false;
#endif /* PAGE_SIZE < 8192) */ #endif /* PAGE_SIZE >= 8192) */
/* If we have drained the page fragment pool we need to update /* If we have drained the page fragment pool we need to update
* the pagecnt_bias and page count so that we fully restock the * the pagecnt_bias and page count so that we fully restock the
...@@ -949,12 +924,7 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, ...@@ -949,12 +924,7 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[ntc]; rx_buf = &rx_ring->rx_buf[ntc];
rx_buf->pgcnt = rx_buf->pgcnt = page_count(rx_buf->page);
#if (PAGE_SIZE < 8192)
page_count(rx_buf->page);
#else
0;
#endif
prefetchw(rx_buf->page); prefetchw(rx_buf->page);
if (!size) if (!size)
...@@ -1160,11 +1130,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ...@@ -1160,11 +1130,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
bool failure; bool failure;
u32 first; u32 first;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
xdp_prog = READ_ONCE(rx_ring->xdp_prog); xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) { if (xdp_prog) {
xdp_ring = rx_ring->xdp_ring; xdp_ring = rx_ring->xdp_ring;
...@@ -1223,10 +1188,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ...@@ -1223,10 +1188,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
hard_start = page_address(rx_buf->page) + rx_buf->page_offset - hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
offset; offset;
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
xdp_buff_clear_frags_flag(xdp); xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment