Commit dc021e6c authored by David S. Miller's avatar David S. Miller

Merge branch 'gve-xdp-support'

Praveen Kaligineedi says:

====================
gve: Add XDP support for GQI-QPL format

Adding support for XDP DROP, PASS, TX, REDIRECT for GQI QPL format.
Add AF_XDP zero-copy support.

When an XDP program is installed, dedicated TX queues are created to
handle XDP traffic. The user needs to ensure that the number of
configured TX queues is equal to the number of configured RX queues; and
the number of TX/RX queues is less than or equal to half the maximum
number of TX/RX queues.

The XDP traffic from AF_XDP sockets and from other NICs (arriving via
XDP_REDIRECT) will also egress through the dedicated XDP TX queues.

Although these changes support AF_XDP socket in zero-copy mode, there is
still a copy happening within the driver between XSK buffer pool and QPL
bounce buffers in GQI-QPL format.

The following example demonstrates how the XDP packets are mapped to
TX queues:

Example configuration:
Max RX queues : 2N, Max TX queues : 2N
Configured RX queues : N, Configured TX queues : N

TX queue mapping:
TX queues with queue id 0,...,N-1 will handle traffic from the stack.
TX queues with queue id N,...,2N-1 will handle XDP traffic.

For the XDP packets transmitted using XDP_TX action:
<Egress TX queue id> = N + <Ingress RX queue id>

For the XDP packets that arrive from other NICs via XDP_REDIRECT action:
<Egress TX queue id> = N + ( smp_processor_id % N )

For AF_XDP zero-copy mode:
<Egress TX queue id> = N + <AF_XDP TX queue id>

Changes in v2:
- Removed gve_close/gve_open when adding XDP dedicated queues. Instead
we add and register additional TX queues when the XDP program is
installed. If the allocation/registration fails we return error and do
not install the XDP program. Added a new patch to enable adding TX queues
without gve_close/gve_open
- Removed xdp tx spin lock from this patch. It is needed for XDP_REDIRECT
support as both XDP_REDIRECT and XDP_TX traffic share the dedicated XDP
queues. Moved the code to add xdp tx spinlock to the subsequent patch
that adds XDP_REDIRECT support.
- Added netdev_err when the user tries to set rx/tx queues to the values
not supported when XDP is enabled.
- Removed rcu annotation for xdp_prog. We disable the napi prior to
adding/removing the xdp_prog and reenable it after the program has
been installed for all the queues.
- Ring the tx doorbell once for napi instead of every XDP TX packet.
- Added a new helper function for freeing the FIFO buffer
- Unregister xdp rxq for all the queues when the registration
fails during XDP program installation
- Register xsk rxq only when XSK buff pool is enabled
- Removed code accessing internal xsk_buff_pool fields
- Removed sleep driven code when disabling XSK buff pool. Disable
napi and re-enable it after disabling XSK pool.
- Make sure that we clean up dma mappings on XSK pool disable
- Use napi_if_scheduled_mark_missed to avoid unnecessary napi move
to the CPU calling ndo_xsk_wakeup()

Changes in v3:
- Padding bytes are used if the XDP TX packet headers do not
fit at tail of TX FIFO. Taking these padding bytes into account
while checking if enough space is available in TX FIFO.

Changes in v4:
- Turn on the carrier based on the link status synchronously rather
than asynchronously when XDP is installed/uninstalled
- Set the supported flags in net_device.xdp_features
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ec4040ae fd8e4032
...@@ -47,6 +47,10 @@ ...@@ -47,6 +47,10 @@
#define GVE_RX_BUFFER_SIZE_DQO 2048 #define GVE_RX_BUFFER_SIZE_DQO 2048
#define GVE_XDP_ACTIONS 5
#define GVE_TX_MAX_HEADER_SIZE 182
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue { struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */ struct gve_rx_desc *desc_ring; /* the descriptor ring */
...@@ -230,7 +234,10 @@ struct gve_rx_ring { ...@@ -230,7 +234,10 @@ struct gve_rx_ring {
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */ u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
u64 xdp_tx_errors;
u64 xdp_redirect_errors;
u64 xdp_alloc_fails;
u64 xdp_actions[GVE_XDP_ACTIONS];
u32 q_num; /* queue index */ u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */ u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */ struct gve_queue_resources *q_resources; /* head and tail pointer idx */
...@@ -238,6 +245,12 @@ struct gve_rx_ring { ...@@ -238,6 +245,12 @@ struct gve_rx_ring {
struct u64_stats_sync statss; /* sync stats for 32bit archs */ struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
/* XDP stuff */
struct xdp_rxq_info xdp_rxq;
struct xdp_rxq_info xsk_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
}; };
/* A TX desc ring entry */ /* A TX desc ring entry */
...@@ -258,7 +271,14 @@ struct gve_tx_iovec { ...@@ -258,7 +271,14 @@ struct gve_tx_iovec {
* ring entry but only used for a pkt_desc not a seg_desc * ring entry but only used for a pkt_desc not a seg_desc
*/ */
struct gve_tx_buffer_state { struct gve_tx_buffer_state {
struct sk_buff *skb; /* skb for this pkt */ union {
struct sk_buff *skb; /* skb for this pkt */
struct xdp_frame *xdp_frame; /* xdp_frame */
};
struct {
u16 size; /* size of xmitted xdp pkt */
u8 is_xsk; /* xsk buff */
} xdp;
union { union {
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
struct { struct {
...@@ -373,6 +393,8 @@ struct gve_tx_ring { ...@@ -373,6 +393,8 @@ struct gve_tx_ring {
struct { struct {
/* Spinlock for when cleanup in progress */ /* Spinlock for when cleanup in progress */
spinlock_t clean_lock; spinlock_t clean_lock;
/* Spinlock for XDP tx traffic */
spinlock_t xdp_lock;
}; };
/* DQO fields. */ /* DQO fields. */
...@@ -450,6 +472,12 @@ struct gve_tx_ring { ...@@ -450,6 +472,12 @@ struct gve_tx_ring {
dma_addr_t q_resources_bus; /* dma address of the queue resources */ dma_addr_t q_resources_bus; /* dma address of the queue resources */
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */ struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct xsk_buff_pool *xsk_pool;
u32 xdp_xsk_wakeup;
u32 xdp_xsk_done;
u64 xdp_xsk_sent;
u64 xdp_xmit;
u64 xdp_xmit_errors;
} ____cacheline_aligned; } ____cacheline_aligned;
/* Wraps the info for one irq including the napi struct and the queues /* Wraps the info for one irq including the napi struct and the queues
...@@ -526,9 +554,11 @@ struct gve_priv { ...@@ -526,9 +554,11 @@ struct gve_priv {
u16 rx_data_slot_cnt; /* rx buffer length */ u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages; u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */ u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
u32 rx_copybreak; /* copy packets smaller than this */ u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */ u16 default_num_queues; /* default num queues to set up */
u16 num_xdp_queues;
struct gve_queue_config tx_cfg; struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg; struct gve_queue_config rx_cfg;
struct gve_qpl_config qpl_cfg; /* map used QPL ids */ struct gve_qpl_config qpl_cfg; /* map used QPL ids */
...@@ -785,7 +815,17 @@ static inline u32 gve_num_tx_qpls(struct gve_priv *priv) ...@@ -785,7 +815,17 @@ static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
if (priv->queue_format != GVE_GQI_QPL_FORMAT) if (priv->queue_format != GVE_GQI_QPL_FORMAT)
return 0; return 0;
return priv->tx_cfg.num_queues; return priv->tx_cfg.num_queues + priv->num_xdp_queues;
}
/* Returns the number of XDP tx queue page lists
*/
static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
{
if (priv->queue_format != GVE_GQI_QPL_FORMAT)
return 0;
return priv->num_xdp_queues;
} }
/* Returns the number of rx queue page lists /* Returns the number of rx queue page lists
...@@ -798,16 +838,35 @@ static inline u32 gve_num_rx_qpls(struct gve_priv *priv) ...@@ -798,16 +838,35 @@ static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
return priv->rx_cfg.num_queues; return priv->rx_cfg.num_queues;
} }
static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
{
return tx_qid;
}
static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
{
return priv->tx_cfg.max_queues + rx_qid;
}
static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
{
return gve_tx_qpl_id(priv, 0);
}
static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
{
return gve_rx_qpl_id(priv, 0);
}
/* Returns a pointer to the next available tx qpl in the list of qpls /* Returns a pointer to the next available tx qpl in the list of qpls
*/ */
static inline static inline
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv) struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
{ {
int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map, int id = gve_tx_qpl_id(priv, tx_qid);
priv->qpl_cfg.qpl_map_size);
/* we are out of tx qpls */ /* QPL already in use */
if (id >= gve_num_tx_qpls(priv)) if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL; return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map); set_bit(id, priv->qpl_cfg.qpl_id_map);
...@@ -817,14 +876,12 @@ struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv) ...@@ -817,14 +876,12 @@ struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
/* Returns a pointer to the next available rx qpl in the list of qpls /* Returns a pointer to the next available rx qpl in the list of qpls
*/ */
static inline static inline
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv) struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
{ {
int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map, int id = gve_rx_qpl_id(priv, rx_qid);
priv->qpl_cfg.qpl_map_size,
gve_num_tx_qpls(priv));
/* we are out of rx qpls */ /* QPL already in use */
if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv)) if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL; return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map); set_bit(id, priv->qpl_cfg.qpl_id_map);
...@@ -843,7 +900,7 @@ static inline void gve_unassign_qpl(struct gve_priv *priv, int id) ...@@ -843,7 +900,7 @@ static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id) int id)
{ {
if (id < gve_num_tx_qpls(priv)) if (id < gve_rx_start_qpl_id(priv))
return DMA_TO_DEVICE; return DMA_TO_DEVICE;
else else
return DMA_FROM_DEVICE; return DMA_FROM_DEVICE;
...@@ -855,6 +912,21 @@ static inline bool gve_is_gqi(struct gve_priv *priv) ...@@ -855,6 +912,21 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT; priv->queue_format == GVE_GQI_QPL_FORMAT;
} }
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
return priv->tx_cfg.num_queues + priv->num_xdp_queues;
}
static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
{
return priv->tx_cfg.num_queues + queue_id;
}
static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
{
return gve_xdp_tx_queue_id(priv, 0);
}
/* buffers */ /* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev, int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma, struct page **page, dma_addr_t *dma,
...@@ -863,9 +935,15 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, ...@@ -863,9 +935,15 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction); enum dma_data_direction);
/* tx handling */ /* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void *data, int len, void *frame_p);
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget); bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv); bool gve_xdp_poll(struct gve_notify_block *block, int budget);
void gve_tx_free_rings_gqi(struct gve_priv *priv); int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
u32 gve_tx_load_event_counter(struct gve_priv *priv, u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx); struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
......
...@@ -516,12 +516,12 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -516,12 +516,12 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
return gve_adminq_issue_cmd(priv, &cmd); return gve_adminq_issue_cmd(priv, &cmd);
} }
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues) int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{ {
int err; int err;
int i; int i;
for (i = 0; i < num_queues; i++) { for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i); err = gve_adminq_create_tx_queue(priv, i);
if (err) if (err)
return err; return err;
...@@ -604,12 +604,12 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -604,12 +604,12 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
return 0; return 0;
} }
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues) int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{ {
int err; int err;
int i; int i;
for (i = 0; i < num_queues; i++) { for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i); err = gve_adminq_destroy_tx_queue(priv, i);
if (err) if (err)
return err; return err;
......
...@@ -410,8 +410,8 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv, ...@@ -410,8 +410,8 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t db_array_bus_addr, dma_addr_t db_array_bus_addr,
u32 num_ntfy_blks); u32 num_ntfy_blks);
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv); int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues); int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id); int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues); int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id); int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv, int gve_adminq_register_page_list(struct gve_priv *priv,
......
...@@ -34,6 +34,11 @@ static u32 gve_get_msglevel(struct net_device *netdev) ...@@ -34,6 +34,11 @@ static u32 gve_get_msglevel(struct net_device *netdev)
return priv->msg_enable; return priv->msg_enable;
} }
/* For the following stats column string names, make sure the order
* matches how it is filled in the code. For xdp_aborted, xdp_drop,
* xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order
* as declared in enum xdp_action inside file uapi/linux/bpf.h .
*/
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
"rx_dropped", "tx_dropped", "tx_timeouts", "rx_dropped", "tx_dropped", "tx_timeouts",
...@@ -49,12 +54,16 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { ...@@ -49,12 +54,16 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
"rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
"rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
"rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
}; };
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]", "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
"tx_dma_mapping_error[%u]", "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
"tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
}; };
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
...@@ -81,8 +90,10 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -81,8 +90,10 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{ {
struct gve_priv *priv = netdev_priv(netdev); struct gve_priv *priv = netdev_priv(netdev);
char *s = (char *)data; char *s = (char *)data;
int num_tx_queues;
int i, j; int i, j;
num_tx_queues = gve_num_tx_queues(priv);
switch (stringset) { switch (stringset) {
case ETH_SS_STATS: case ETH_SS_STATS:
memcpy(s, *gve_gstrings_main_stats, memcpy(s, *gve_gstrings_main_stats,
...@@ -97,7 +108,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -97,7 +108,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
} }
} }
for (i = 0; i < priv->tx_cfg.num_queues; i++) { for (i = 0; i < num_tx_queues; i++) {
for (j = 0; j < NUM_GVE_TX_CNTS; j++) { for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
snprintf(s, ETH_GSTRING_LEN, snprintf(s, ETH_GSTRING_LEN,
gve_gstrings_tx_stats[j], i); gve_gstrings_tx_stats[j], i);
...@@ -124,12 +135,14 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -124,12 +135,14 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
static int gve_get_sset_count(struct net_device *netdev, int sset) static int gve_get_sset_count(struct net_device *netdev, int sset)
{ {
struct gve_priv *priv = netdev_priv(netdev); struct gve_priv *priv = netdev_priv(netdev);
int num_tx_queues;
num_tx_queues = gve_num_tx_queues(priv);
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN + return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
(priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
(priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS); (num_tx_queues * NUM_GVE_TX_CNTS);
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
return GVE_PRIV_FLAGS_STR_LEN; return GVE_PRIV_FLAGS_STR_LEN;
default: default:
...@@ -153,18 +166,20 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -153,18 +166,20 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_priv *priv; struct gve_priv *priv;
bool skip_nic_stats; bool skip_nic_stats;
unsigned int start; unsigned int start;
int num_tx_queues;
int ring; int ring;
int i, j; int i, j;
ASSERT_RTNL(); ASSERT_RTNL();
priv = netdev_priv(netdev); priv = netdev_priv(netdev);
num_tx_queues = gve_num_tx_queues(priv);
report_stats = priv->stats_report->stats; report_stats = priv->stats_report->stats;
rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues, rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
sizeof(int), GFP_KERNEL); sizeof(int), GFP_KERNEL);
if (!rx_qid_to_stats_idx) if (!rx_qid_to_stats_idx)
return; return;
tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues, tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
sizeof(int), GFP_KERNEL); sizeof(int), GFP_KERNEL);
if (!tx_qid_to_stats_idx) { if (!tx_qid_to_stats_idx) {
kfree(rx_qid_to_stats_idx); kfree(rx_qid_to_stats_idx);
...@@ -195,7 +210,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -195,7 +210,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} }
} }
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
ring < priv->tx_cfg.num_queues; ring++) { ring < num_tx_queues; ring++) {
if (priv->tx) { if (priv->tx) {
do { do {
start = start =
...@@ -232,7 +247,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -232,7 +247,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = GVE_MAIN_STATS_LEN; i = GVE_MAIN_STATS_LEN;
/* For rx cross-reporting stats, start from nic rx stats in report */ /* For rx cross-reporting stats, start from nic rx stats in report */
base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues + base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
base_stats_idx; base_stats_idx;
...@@ -283,14 +298,26 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -283,14 +298,26 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (skip_nic_stats) { if (skip_nic_stats) {
/* skip NIC rx stats */ /* skip NIC rx stats */
i += NIC_RX_STATS_REPORT_NUM; i += NIC_RX_STATS_REPORT_NUM;
continue; } else {
} stats_idx = rx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) { for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
u64 value = u64 value =
be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value); be64_to_cpu(report_stats[stats_idx + j].value);
data[i++] = value; data[i++] = value;
}
} }
/* XDP rx counters */
do {
start = u64_stats_fetch_begin(&priv->rx[ring].statss);
for (j = 0; j < GVE_XDP_ACTIONS; j++)
data[i + j] = rx->xdp_actions[j];
data[i + j++] = rx->xdp_tx_errors;
data[i + j++] = rx->xdp_redirect_errors;
data[i + j++] = rx->xdp_alloc_fails;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
} }
} else { } else {
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
...@@ -298,7 +325,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -298,7 +325,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For tx cross-reporting stats, start from nic tx stats in report */ /* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx; base_stats_idx = max_stats_idx;
max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues + max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
max_stats_idx; max_stats_idx;
/* Preprocess the stats report for tx, map queue id to start index */ /* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false; skip_nic_stats = false;
...@@ -316,7 +343,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -316,7 +343,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} }
/* walk TX rings */ /* walk TX rings */
if (priv->tx) { if (priv->tx) {
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { for (ring = 0; ring < num_tx_queues; ring++) {
struct gve_tx_ring *tx = &priv->tx[ring]; struct gve_tx_ring *tx = &priv->tx[ring];
if (gve_is_gqi(priv)) { if (gve_is_gqi(priv)) {
...@@ -346,16 +373,28 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -346,16 +373,28 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (skip_nic_stats) { if (skip_nic_stats) {
/* skip NIC tx stats */ /* skip NIC tx stats */
i += NIC_TX_STATS_REPORT_NUM; i += NIC_TX_STATS_REPORT_NUM;
continue; } else {
} stats_idx = tx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) { for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
u64 value = u64 value =
be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value); be64_to_cpu(report_stats[stats_idx + j].value);
data[i++] = value; data[i++] = value;
}
} }
/* XDP xsk counters */
data[i++] = tx->xdp_xsk_wakeup;
data[i++] = tx->xdp_xsk_done;
do {
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
data[i] = tx->xdp_xsk_sent;
data[i + 1] = tx->xdp_xmit;
data[i + 2] = tx->xdp_xmit_errors;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
i += 3; /* XDP tx counters */
} }
} else { } else {
i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS; i += num_tx_queues * NUM_GVE_TX_CNTS;
} }
kfree(rx_qid_to_stats_idx); kfree(rx_qid_to_stats_idx);
...@@ -412,6 +451,12 @@ static int gve_set_channels(struct net_device *netdev, ...@@ -412,6 +451,12 @@ static int gve_set_channels(struct net_device *netdev,
if (!new_rx || !new_tx) if (!new_rx || !new_tx)
return -EINVAL; return -EINVAL;
if (priv->num_xdp_queues &&
(new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
return -EINVAL;
}
if (!netif_carrier_ok(netdev)) { if (!netif_carrier_ok(netdev)) {
priv->tx_cfg.num_queues = new_tx; priv->tx_cfg.num_queues = new_tx;
priv->rx_cfg.num_queues = new_rx; priv->rx_cfg.num_queues = new_rx;
...@@ -502,7 +547,9 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -502,7 +547,9 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
{ {
struct gve_priv *priv = netdev_priv(netdev); struct gve_priv *priv = netdev_priv(netdev);
u64 ori_flags, new_flags; u64 ori_flags, new_flags;
int num_tx_queues;
num_tx_queues = gve_num_tx_queues(priv);
ori_flags = READ_ONCE(priv->ethtool_flags); ori_flags = READ_ONCE(priv->ethtool_flags);
new_flags = ori_flags; new_flags = ori_flags;
...@@ -522,7 +569,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -522,7 +569,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
/* delete report stats timer. */ /* delete report stats timer. */
if (!(flags & BIT(0)) && (ori_flags & BIT(0))) { if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
int tx_stats_num = GVE_TX_STATS_REPORT_NUM * int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
priv->tx_cfg.num_queues; num_tx_queues;
int rx_stats_num = GVE_RX_STATS_REPORT_NUM * int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
priv->rx_cfg.num_queues; priv->rx_cfg.num_queues;
......
This diff is collapsed.
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
#include "gve_adminq.h" #include "gve_adminq.h"
#include "gve_utils.h" #include "gve_utils.h"
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/filter.h>
#include <net/xdp.h>
#include <net/xdp_sock_drv.h>
static void gve_rx_free_buffer(struct device *dev, static void gve_rx_free_buffer(struct device *dev,
struct gve_rx_slot_page_info *page_info, struct gve_rx_slot_page_info *page_info,
...@@ -124,7 +127,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) ...@@ -124,7 +127,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return -ENOMEM; return -ENOMEM;
if (!rx->data.raw_addressing) { if (!rx->data.raw_addressing) {
rx->data.qpl = gve_assign_rx_qpl(priv); rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num);
if (!rx->data.qpl) { if (!rx->data.qpl) {
kvfree(rx->data.page_info); kvfree(rx->data.page_info);
rx->data.page_info = NULL; rx->data.page_info = NULL;
...@@ -556,7 +559,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, ...@@ -556,7 +559,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
if (len <= priv->rx_copybreak && is_only_frag) { if (len <= priv->rx_copybreak && is_only_frag) {
/* Just copy small packets */ /* Just copy small packets */
skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD); skb = gve_rx_copy(netdev, napi, page_info, len);
if (skb) { if (skb) {
u64_stats_update_begin(&rx->statss); u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++; rx->rx_copied_pkt++;
...@@ -591,6 +594,107 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, ...@@ -591,6 +594,107 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
return skb; return skb;
} }
static int gve_xsk_pool_redirect(struct net_device *dev,
struct gve_rx_ring *rx,
void *data, int len,
struct bpf_prog *xdp_prog)
{
struct xdp_buff *xdp;
int err;
if (rx->xsk_pool->frame_len < len)
return -E2BIG;
xdp = xsk_buff_alloc(rx->xsk_pool);
if (!xdp) {
u64_stats_update_begin(&rx->statss);
rx->xdp_alloc_fails++;
u64_stats_update_end(&rx->statss);
return -ENOMEM;
}
xdp->data_end = xdp->data + len;
memcpy(xdp->data, data, len);
err = xdp_do_redirect(dev, xdp, xdp_prog);
if (err)
xsk_buff_free(xdp);
return err;
}
static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx,
struct xdp_buff *orig, struct bpf_prog *xdp_prog)
{
int total_len, len = orig->data_end - orig->data;
int headroom = XDP_PACKET_HEADROOM;
struct xdp_buff new;
void *frame;
int err;
if (rx->xsk_pool)
return gve_xsk_pool_redirect(dev, rx, orig->data,
len, xdp_prog);
total_len = headroom + SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC);
if (!frame) {
u64_stats_update_begin(&rx->statss);
rx->xdp_alloc_fails++;
u64_stats_update_end(&rx->statss);
return -ENOMEM;
}
xdp_init_buff(&new, total_len, &rx->xdp_rxq);
xdp_prepare_buff(&new, frame, headroom, len, false);
memcpy(new.data, orig->data, len);
err = xdp_do_redirect(dev, &new, xdp_prog);
if (err)
page_frag_free(frame);
return err;
}
static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx,
struct xdp_buff *xdp, struct bpf_prog *xprog,
int xdp_act)
{
struct gve_tx_ring *tx;
int tx_qid;
int err;
switch (xdp_act) {
case XDP_ABORTED:
case XDP_DROP:
default:
break;
case XDP_TX:
tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
tx = &priv->tx[tx_qid];
spin_lock(&tx->xdp_lock);
err = gve_xdp_xmit_one(priv, tx, xdp->data,
xdp->data_end - xdp->data, NULL);
spin_unlock(&tx->xdp_lock);
if (unlikely(err)) {
u64_stats_update_begin(&rx->statss);
rx->xdp_tx_errors++;
u64_stats_update_end(&rx->statss);
}
break;
case XDP_REDIRECT:
err = gve_xdp_redirect(priv->dev, rx, xdp, xprog);
if (unlikely(err)) {
u64_stats_update_begin(&rx->statss);
rx->xdp_redirect_errors++;
u64_stats_update_end(&rx->statss);
}
break;
}
u64_stats_update_begin(&rx->statss);
if ((u32)xdp_act < GVE_XDP_ACTIONS)
rx->xdp_actions[xdp_act]++;
u64_stats_update_end(&rx->statss);
}
#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x)) #define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
struct gve_rx_desc *desc, u32 idx, struct gve_rx_desc *desc, u32 idx,
...@@ -603,9 +707,12 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, ...@@ -603,9 +707,12 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
union gve_rx_data_slot *data_slot; union gve_rx_data_slot *data_slot;
struct gve_priv *priv = rx->gve; struct gve_priv *priv = rx->gve;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct bpf_prog *xprog;
struct xdp_buff xdp;
dma_addr_t page_bus; dma_addr_t page_bus;
void *va; void *va;
u16 len = frag_size;
struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
bool is_first_frag = ctx->frag_cnt == 0; bool is_first_frag = ctx->frag_cnt == 0;
...@@ -645,9 +752,35 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, ...@@ -645,9 +752,35 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
PAGE_SIZE, DMA_FROM_DEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
page_info->pad = is_first_frag ? GVE_RX_PAD : 0; page_info->pad = is_first_frag ? GVE_RX_PAD : 0;
len -= page_info->pad;
frag_size -= page_info->pad; frag_size -= page_info->pad;
skb = gve_rx_skb(priv, rx, page_info, napi, frag_size, xprog = READ_ONCE(priv->xdp_prog);
if (xprog && is_only_frag) {
void *old_data;
int xdp_act;
xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
xdp_prepare_buff(&xdp, page_info->page_address +
page_info->page_offset, GVE_RX_PAD,
len, false);
old_data = xdp.data;
xdp_act = bpf_prog_run_xdp(xprog, &xdp);
if (xdp_act != XDP_PASS) {
gve_xdp_done(priv, rx, &xdp, xprog, xdp_act);
ctx->total_size += frag_size;
goto finish_ok_pkt;
}
page_info->pad += xdp.data - old_data;
len = xdp.data_end - xdp.data;
u64_stats_update_begin(&rx->statss);
rx->xdp_actions[XDP_PASS]++;
u64_stats_update_end(&rx->statss);
}
skb = gve_rx_skb(priv, rx, page_info, napi, len,
data_slot, is_only_frag); data_slot, is_only_frag);
if (!skb) { if (!skb) {
u64_stats_update_begin(&rx->statss); u64_stats_update_begin(&rx->statss);
...@@ -773,6 +906,8 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) ...@@ -773,6 +906,8 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat) netdev_features_t feat)
{ {
u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
u64 xdp_txs = rx->xdp_actions[XDP_TX];
struct gve_rx_ctx *ctx = &rx->ctx; struct gve_rx_ctx *ctx = &rx->ctx;
struct gve_priv *priv = rx->gve; struct gve_priv *priv = rx->gve;
struct gve_rx_cnts cnts = {0}; struct gve_rx_cnts cnts = {0};
...@@ -820,6 +955,12 @@ static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, ...@@ -820,6 +955,12 @@ static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
u64_stats_update_end(&rx->statss); u64_stats_update_end(&rx->statss);
} }
if (xdp_txs != rx->xdp_actions[XDP_TX])
gve_xdp_tx_flush(priv, rx->q_num);
if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
xdp_do_flush();
/* restock ring slots */ /* restock ring slots */
if (!rx->data.raw_addressing) { if (!rx->data.raw_addressing) {
/* In QPL mode buffs are refilled as the desc are processed */ /* In QPL mode buffs are refilled as the desc are processed */
......
...@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (eop && buf_len <= priv->rx_copybreak) { if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0); &buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head)) if (unlikely(!rx->ctx.skb_head))
goto error; goto error;
rx->ctx.skb_tail = rx->ctx.skb_head; rx->ctx.skb_tail = rx->ctx.skb_head;
......
This diff is collapsed.
...@@ -49,10 +49,10 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) ...@@ -49,10 +49,10 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
} }
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len)
u16 padding)
{ {
void *va = page_info->page_address + padding + page_info->page_offset; void *va = page_info->page_address + page_info->page_offset +
page_info->pad;
struct sk_buff *skb; struct sk_buff *skb;
skb = napi_alloc_skb(napi, len); skb = napi_alloc_skb(napi, len);
......
...@@ -18,8 +18,7 @@ void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx); ...@@ -18,8 +18,7 @@ void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len);
u16 pad);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */ /* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info); void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment