Commit 2e80aeae authored by Praveen Kaligineedi's avatar Praveen Kaligineedi Committed by David S. Miller

gve: XDP support GQI-QPL: helper function changes

This patch adds/modifies helper functions needed to add XDP
support.
Signed-off-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarJeroen de Borst <jeroendb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ec4040ae
...@@ -855,6 +855,11 @@ static inline bool gve_is_gqi(struct gve_priv *priv) ...@@ -855,6 +855,11 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT; priv->queue_format == GVE_GQI_QPL_FORMAT;
} }
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
return priv->tx_cfg.num_queues;
}
/* buffers */ /* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev, int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma, struct page **page, dma_addr_t *dma,
......
...@@ -81,8 +81,10 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -81,8 +81,10 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{ {
struct gve_priv *priv = netdev_priv(netdev); struct gve_priv *priv = netdev_priv(netdev);
char *s = (char *)data; char *s = (char *)data;
int num_tx_queues;
int i, j; int i, j;
num_tx_queues = gve_num_tx_queues(priv);
switch (stringset) { switch (stringset) {
case ETH_SS_STATS: case ETH_SS_STATS:
memcpy(s, *gve_gstrings_main_stats, memcpy(s, *gve_gstrings_main_stats,
...@@ -97,7 +99,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -97,7 +99,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
} }
} }
for (i = 0; i < priv->tx_cfg.num_queues; i++) { for (i = 0; i < num_tx_queues; i++) {
for (j = 0; j < NUM_GVE_TX_CNTS; j++) { for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
snprintf(s, ETH_GSTRING_LEN, snprintf(s, ETH_GSTRING_LEN,
gve_gstrings_tx_stats[j], i); gve_gstrings_tx_stats[j], i);
...@@ -124,12 +126,14 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -124,12 +126,14 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
static int gve_get_sset_count(struct net_device *netdev, int sset) static int gve_get_sset_count(struct net_device *netdev, int sset)
{ {
struct gve_priv *priv = netdev_priv(netdev); struct gve_priv *priv = netdev_priv(netdev);
int num_tx_queues;
num_tx_queues = gve_num_tx_queues(priv);
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN + return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
(priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
(priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS); (num_tx_queues * NUM_GVE_TX_CNTS);
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
return GVE_PRIV_FLAGS_STR_LEN; return GVE_PRIV_FLAGS_STR_LEN;
default: default:
...@@ -153,18 +157,20 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -153,18 +157,20 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_priv *priv; struct gve_priv *priv;
bool skip_nic_stats; bool skip_nic_stats;
unsigned int start; unsigned int start;
int num_tx_queues;
int ring; int ring;
int i, j; int i, j;
ASSERT_RTNL(); ASSERT_RTNL();
priv = netdev_priv(netdev); priv = netdev_priv(netdev);
num_tx_queues = gve_num_tx_queues(priv);
report_stats = priv->stats_report->stats; report_stats = priv->stats_report->stats;
rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues, rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
sizeof(int), GFP_KERNEL); sizeof(int), GFP_KERNEL);
if (!rx_qid_to_stats_idx) if (!rx_qid_to_stats_idx)
return; return;
tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues, tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
sizeof(int), GFP_KERNEL); sizeof(int), GFP_KERNEL);
if (!tx_qid_to_stats_idx) { if (!tx_qid_to_stats_idx) {
kfree(rx_qid_to_stats_idx); kfree(rx_qid_to_stats_idx);
...@@ -195,7 +201,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -195,7 +201,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} }
} }
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
ring < priv->tx_cfg.num_queues; ring++) { ring < num_tx_queues; ring++) {
if (priv->tx) { if (priv->tx) {
do { do {
start = start =
...@@ -232,7 +238,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -232,7 +238,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = GVE_MAIN_STATS_LEN; i = GVE_MAIN_STATS_LEN;
/* For rx cross-reporting stats, start from nic rx stats in report */ /* For rx cross-reporting stats, start from nic rx stats in report */
base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues + base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
base_stats_idx; base_stats_idx;
...@@ -298,7 +304,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -298,7 +304,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For tx cross-reporting stats, start from nic tx stats in report */ /* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx; base_stats_idx = max_stats_idx;
max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues + max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
max_stats_idx; max_stats_idx;
/* Preprocess the stats report for tx, map queue id to start index */ /* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false; skip_nic_stats = false;
...@@ -316,7 +322,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -316,7 +322,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} }
/* walk TX rings */ /* walk TX rings */
if (priv->tx) { if (priv->tx) {
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { for (ring = 0; ring < num_tx_queues; ring++) {
struct gve_tx_ring *tx = &priv->tx[ring]; struct gve_tx_ring *tx = &priv->tx[ring];
if (gve_is_gqi(priv)) { if (gve_is_gqi(priv)) {
...@@ -355,7 +361,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -355,7 +361,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} }
} }
} else { } else {
i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS; i += num_tx_queues * NUM_GVE_TX_CNTS;
} }
kfree(rx_qid_to_stats_idx); kfree(rx_qid_to_stats_idx);
...@@ -502,7 +508,9 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -502,7 +508,9 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
{ {
struct gve_priv *priv = netdev_priv(netdev); struct gve_priv *priv = netdev_priv(netdev);
u64 ori_flags, new_flags; u64 ori_flags, new_flags;
int num_tx_queues;
num_tx_queues = gve_num_tx_queues(priv);
ori_flags = READ_ONCE(priv->ethtool_flags); ori_flags = READ_ONCE(priv->ethtool_flags);
new_flags = ori_flags; new_flags = ori_flags;
...@@ -522,7 +530,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -522,7 +530,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
/* delete report stats timer. */ /* delete report stats timer. */
if (!(flags & BIT(0)) && (ori_flags & BIT(0))) { if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
int tx_stats_num = GVE_TX_STATS_REPORT_NUM * int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
priv->tx_cfg.num_queues; num_tx_queues;
int rx_stats_num = GVE_RX_STATS_REPORT_NUM * int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
priv->rx_cfg.num_queues; priv->rx_cfg.num_queues;
......
...@@ -90,8 +90,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) ...@@ -90,8 +90,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
struct gve_priv *priv = netdev_priv(dev); struct gve_priv *priv = netdev_priv(dev);
unsigned int start; unsigned int start;
u64 packets, bytes; u64 packets, bytes;
int num_tx_queues;
int ring; int ring;
num_tx_queues = gve_num_tx_queues(priv);
if (priv->rx) { if (priv->rx) {
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do { do {
...@@ -106,7 +108,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) ...@@ -106,7 +108,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
} }
} }
if (priv->tx) { if (priv->tx) {
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { for (ring = 0; ring < num_tx_queues; ring++) {
do { do {
start = start =
u64_stats_fetch_begin(&priv->tx[ring].statss); u64_stats_fetch_begin(&priv->tx[ring].statss);
...@@ -180,7 +182,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv) ...@@ -180,7 +182,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
int tx_stats_num, rx_stats_num; int tx_stats_num, rx_stats_num;
tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) * tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
priv->tx_cfg.num_queues; gve_num_tx_queues(priv);
rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
priv->rx_cfg.num_queues; priv->rx_cfg.num_queues;
priv->stats_report_len = struct_size(priv->stats_report, stats, priv->stats_report_len = struct_size(priv->stats_report, stats,
...@@ -622,20 +624,21 @@ static int gve_unregister_qpls(struct gve_priv *priv) ...@@ -622,20 +624,21 @@ static int gve_unregister_qpls(struct gve_priv *priv)
static int gve_create_rings(struct gve_priv *priv) static int gve_create_rings(struct gve_priv *priv)
{ {
int num_tx_queues = gve_num_tx_queues(priv);
int err; int err;
int i; int i;
err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues); err = gve_adminq_create_tx_queues(priv, num_tx_queues);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
priv->tx_cfg.num_queues); num_tx_queues);
/* This failure will trigger a reset - no need to clean /* This failure will trigger a reset - no need to clean
* up * up
*/ */
return err; return err;
} }
netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
priv->tx_cfg.num_queues); num_tx_queues);
err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
if (err) { if (err) {
...@@ -675,7 +678,7 @@ static void add_napi_init_sync_stats(struct gve_priv *priv, ...@@ -675,7 +678,7 @@ static void add_napi_init_sync_stats(struct gve_priv *priv,
int i; int i;
/* Add tx napi & init sync stats*/ /* Add tx napi & init sync stats*/
for (i = 0; i < priv->tx_cfg.num_queues; i++) { for (i = 0; i < gve_num_tx_queues(priv); i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss); u64_stats_init(&priv->tx[i].statss);
...@@ -753,9 +756,10 @@ static int gve_alloc_rings(struct gve_priv *priv) ...@@ -753,9 +756,10 @@ static int gve_alloc_rings(struct gve_priv *priv)
static int gve_destroy_rings(struct gve_priv *priv) static int gve_destroy_rings(struct gve_priv *priv)
{ {
int num_tx_queues = gve_num_tx_queues(priv);
int err; int err;
err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues); err = gve_adminq_destroy_tx_queues(priv, num_tx_queues);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, netif_err(priv, drv, priv->dev,
"failed to destroy tx queues\n"); "failed to destroy tx queues\n");
...@@ -784,11 +788,12 @@ static void gve_rx_free_rings(struct gve_priv *priv) ...@@ -784,11 +788,12 @@ static void gve_rx_free_rings(struct gve_priv *priv)
static void gve_free_rings(struct gve_priv *priv) static void gve_free_rings(struct gve_priv *priv)
{ {
int num_tx_queues = gve_num_tx_queues(priv);
int ntfy_idx; int ntfy_idx;
int i; int i;
if (priv->tx) { if (priv->tx) {
for (i = 0; i < priv->tx_cfg.num_queues; i++) { for (i = 0; i < num_tx_queues; i++) {
ntfy_idx = gve_tx_idx_to_ntfy(priv, i); ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
gve_remove_napi(priv, ntfy_idx); gve_remove_napi(priv, ntfy_idx);
} }
...@@ -1118,7 +1123,7 @@ static void gve_turndown(struct gve_priv *priv) ...@@ -1118,7 +1123,7 @@ static void gve_turndown(struct gve_priv *priv)
return; return;
/* Disable napi to prevent more work from coming in */ /* Disable napi to prevent more work from coming in */
for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
...@@ -1146,7 +1151,7 @@ static void gve_turnup(struct gve_priv *priv) ...@@ -1146,7 +1151,7 @@ static void gve_turnup(struct gve_priv *priv)
netif_tx_start_all_queues(priv->dev); netif_tx_start_all_queues(priv->dev);
/* Enable napi and unmask interrupts for all queues */ /* Enable napi and unmask interrupts for all queues */
for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
...@@ -1306,7 +1311,7 @@ void gve_handle_report_stats(struct gve_priv *priv) ...@@ -1306,7 +1311,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
be64_add_cpu(&priv->stats_report->written_count, 1); be64_add_cpu(&priv->stats_report->written_count, 1);
/* tx stats */ /* tx stats */
if (priv->tx) { if (priv->tx) {
for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
u32 last_completion = 0; u32 last_completion = 0;
u32 tx_frames = 0; u32 tx_frames = 0;
......
...@@ -556,7 +556,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, ...@@ -556,7 +556,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
if (len <= priv->rx_copybreak && is_only_frag) { if (len <= priv->rx_copybreak && is_only_frag) {
/* Just copy small packets */ /* Just copy small packets */
skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD); skb = gve_rx_copy(netdev, napi, page_info, len);
if (skb) { if (skb) {
u64_stats_update_begin(&rx->statss); u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++; rx->rx_copied_pkt++;
......
...@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (eop && buf_len <= priv->rx_copybreak) { if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0); &buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head)) if (unlikely(!rx->ctx.skb_head))
goto error; goto error;
rx->ctx.skb_tail = rx->ctx.skb_head; rx->ctx.skb_tail = rx->ctx.skb_head;
......
...@@ -374,18 +374,18 @@ static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -374,18 +374,18 @@ static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
} }
static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
struct sk_buff *skb, bool is_gso, u16 csum_offset, u8 ip_summed, bool is_gso,
int l4_hdr_offset, u32 desc_cnt, int l4_hdr_offset, u32 desc_cnt,
u16 hlen, u64 addr) u16 hlen, u64 addr, u16 pkt_len)
{ {
/* l4_hdr_offset and csum_offset are in units of 16-bit words */ /* l4_hdr_offset and csum_offset are in units of 16-bit words */
if (is_gso) { if (is_gso) {
pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM; pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1; pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { } else if (likely(ip_summed == CHECKSUM_PARTIAL)) {
pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM; pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1; pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1; pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
} else { } else {
pkt_desc->pkt.type_flags = GVE_TXD_STD; pkt_desc->pkt.type_flags = GVE_TXD_STD;
...@@ -393,7 +393,7 @@ static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, ...@@ -393,7 +393,7 @@ static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
pkt_desc->pkt.l4_hdr_offset = 0; pkt_desc->pkt.l4_hdr_offset = 0;
} }
pkt_desc->pkt.desc_cnt = desc_cnt; pkt_desc->pkt.desc_cnt = desc_cnt;
pkt_desc->pkt.len = cpu_to_be16(skb->len); pkt_desc->pkt.len = cpu_to_be16(pkt_len);
pkt_desc->pkt.seg_len = cpu_to_be16(hlen); pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
pkt_desc->pkt.seg_addr = cpu_to_be64(addr); pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
} }
...@@ -412,15 +412,16 @@ static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc, ...@@ -412,15 +412,16 @@ static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
} }
static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
struct sk_buff *skb, bool is_gso, u16 l3_offset, u16 gso_size,
bool is_gso_v6, bool is_gso,
u16 len, u64 addr) u16 len, u64 addr)
{ {
seg_desc->seg.type_flags = GVE_TXD_SEG; seg_desc->seg.type_flags = GVE_TXD_SEG;
if (is_gso) { if (is_gso) {
if (skb_is_gso_v6(skb)) if (is_gso_v6)
seg_desc->seg.type_flags |= GVE_TXSF_IPV6; seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1; seg_desc->seg.l3_offset = l3_offset >> 1;
seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); seg_desc->seg.mss = cpu_to_be16(gso_size);
} }
seg_desc->seg.seg_len = cpu_to_be16(len); seg_desc->seg.seg_len = cpu_to_be16(len);
seg_desc->seg.seg_addr = cpu_to_be64(addr); seg_desc->seg.seg_addr = cpu_to_be64(addr);
...@@ -473,9 +474,10 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st ...@@ -473,9 +474,10 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
&info->iov[payload_iov]); &info->iov[payload_iov]);
gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
is_gso, l4_hdr_offset,
1 + mtd_desc_nr + payload_nfrags, hlen, 1 + mtd_desc_nr + payload_nfrags, hlen,
info->iov[hdr_nfrags - 1].iov_offset); info->iov[hdr_nfrags - 1].iov_offset, skb->len);
skb_copy_bits(skb, 0, skb_copy_bits(skb, 0,
tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
...@@ -494,7 +496,9 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st ...@@ -494,7 +496,9 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
seg_desc = &tx->desc[next_idx]; seg_desc = &tx->desc[next_idx];
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
skb_shinfo(skb)->gso_size,
skb_is_gso_v6(skb), is_gso,
info->iov[i].iov_len, info->iov[i].iov_len,
info->iov[i].iov_offset); info->iov[i].iov_offset);
...@@ -552,8 +556,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -552,8 +556,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
if (mtd_desc_nr) if (mtd_desc_nr)
num_descriptors++; num_descriptors++;
gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
num_descriptors, hlen, addr); is_gso, l4_hdr_offset,
num_descriptors, hlen, addr, skb->len);
if (mtd_desc_nr) { if (mtd_desc_nr) {
idx = (idx + 1) & tx->mask; idx = (idx + 1) & tx->mask;
...@@ -569,7 +574,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -569,7 +574,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
addr += hlen; addr += hlen;
idx = (idx + 1) & tx->mask; idx = (idx + 1) & tx->mask;
seg_desc = &tx->desc[idx]; seg_desc = &tx->desc[idx];
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
skb_shinfo(skb)->gso_size,
skb_is_gso_v6(skb), is_gso, len, addr);
} }
for (i = 0; i < shinfo->nr_frags; i++) { for (i = 0; i < shinfo->nr_frags; i++) {
...@@ -587,7 +594,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -587,7 +594,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
dma_unmap_len_set(&tx->info[idx], len, len); dma_unmap_len_set(&tx->info[idx], len, len);
dma_unmap_addr_set(&tx->info[idx], dma, addr); dma_unmap_addr_set(&tx->info[idx], dma, addr);
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
skb_shinfo(skb)->gso_size,
skb_is_gso_v6(skb), is_gso, len, addr);
} }
return num_descriptors; return num_descriptors;
......
...@@ -49,10 +49,10 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) ...@@ -49,10 +49,10 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
} }
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len)
u16 padding)
{ {
void *va = page_info->page_address + padding + page_info->page_offset; void *va = page_info->page_address + page_info->page_offset +
page_info->pad;
struct sk_buff *skb; struct sk_buff *skb;
skb = napi_alloc_skb(napi, len); skb = napi_alloc_skb(napi, len);
......
...@@ -18,8 +18,7 @@ void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx); ...@@ -18,8 +18,7 @@ void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len);
u16 pad);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */ /* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info); void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment