Commit 3a1f6f45 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-xilinx-axienet-partial-checksum-offload-improvements'

Sean Anderson says:

====================
net: xilinx: axienet: Partial checksum offload improvements

Partial checksum offload is not always used when it could be.
Enable it in more cases.
====================

Link: https://patch.msgid.link/20240909161016.1149119-1-sean.anderson@linux.devSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents d59239f8 736f0c7a
......@@ -529,8 +529,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
* @csum_offload_on_tx_path: Stores the checksum selection on TX side.
* @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
......@@ -609,9 +607,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
int csum_offload_on_tx_path;
int csum_offload_on_rx_path;
u32 coalesce_count_rx;
u32 coalesce_usec_rx;
u32 coalesce_count_tx;
......
......@@ -1188,9 +1188,7 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
skb->protocol == htons(ETH_P_IP) &&
skb->len > 64) {
} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
......@@ -2639,38 +2637,28 @@ static int axienet_probe(struct platform_device *pdev)
if (!ret) {
switch (value) {
case 1:
lp->csum_offload_on_tx_path =
XAE_FEATURE_PARTIAL_TX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
/* Can checksum any contiguous range */
ndev->features |= NETIF_F_HW_CSUM;
break;
case 2:
lp->csum_offload_on_tx_path =
XAE_FEATURE_FULL_TX_CSUM;
lp->features |= XAE_FEATURE_FULL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
break;
default:
lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
}
}
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
if (!ret) {
switch (value) {
case 1:
lp->csum_offload_on_rx_path =
XAE_FEATURE_PARTIAL_RX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
ndev->features |= NETIF_F_RXCSUM;
break;
case 2:
lp->csum_offload_on_rx_path =
XAE_FEATURE_FULL_RX_CSUM;
lp->features |= XAE_FEATURE_FULL_RX_CSUM;
ndev->features |= NETIF_F_RXCSUM;
break;
default:
lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
}
}
/* For supporting jumbo frames, the Axi Ethernet hardware must have
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment