Commit 7fc2bf78 authored by Praveen Kaligineedi's avatar Praveen Kaligineedi Committed by David S. Miller

gve: Changes to add new TX queues

Changes to enable adding and removing TX queues without calling
gve_close() and gve_open().

Made the following changes:
1) priv->tx, priv->rx and priv->qpls arrays are allocated based on
   max tx queues and max rx queues
2) Changed gve_adminq_create_tx_queues(), gve_adminq_destroy_tx_queues(),
gve_tx_alloc_rings() and gve_tx_free_rings() functions to add/remove a
subset of TX queues rather than all the TX queues.
Signed-off-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarJeroen de Borst <jeroendb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2e80aeae
...@@ -798,16 +798,35 @@ static inline u32 gve_num_rx_qpls(struct gve_priv *priv) ...@@ -798,16 +798,35 @@ static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
return priv->rx_cfg.num_queues; return priv->rx_cfg.num_queues;
} }
static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
{
return tx_qid;
}
static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
{
return priv->tx_cfg.max_queues + rx_qid;
}
static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
{
return gve_tx_qpl_id(priv, 0);
}
static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
{
return gve_rx_qpl_id(priv, 0);
}
/* Returns a pointer to the next available tx qpl in the list of qpls /* Returns a pointer to the next available tx qpl in the list of qpls
*/ */
static inline static inline
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv) struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
{ {
int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map, int id = gve_tx_qpl_id(priv, tx_qid);
priv->qpl_cfg.qpl_map_size);
/* we are out of tx qpls */ /* QPL already in use */
if (id >= gve_num_tx_qpls(priv)) if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL; return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map); set_bit(id, priv->qpl_cfg.qpl_id_map);
...@@ -817,14 +836,12 @@ struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv) ...@@ -817,14 +836,12 @@ struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
/* Returns a pointer to the next available rx qpl in the list of qpls /* Returns a pointer to the next available rx qpl in the list of qpls
*/ */
static inline static inline
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv) struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
{ {
int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map, int id = gve_rx_qpl_id(priv, rx_qid);
priv->qpl_cfg.qpl_map_size,
gve_num_tx_qpls(priv));
/* we are out of rx qpls */ /* QPL already in use */
if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv)) if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL; return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map); set_bit(id, priv->qpl_cfg.qpl_id_map);
...@@ -843,7 +860,7 @@ static inline void gve_unassign_qpl(struct gve_priv *priv, int id) ...@@ -843,7 +860,7 @@ static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id) int id)
{ {
if (id < gve_num_tx_qpls(priv)) if (id < gve_rx_start_qpl_id(priv))
return DMA_TO_DEVICE; return DMA_TO_DEVICE;
else else
return DMA_FROM_DEVICE; return DMA_FROM_DEVICE;
...@@ -869,8 +886,8 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, ...@@ -869,8 +886,8 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
/* tx handling */ /* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
bool gve_tx_poll(struct gve_notify_block *block, int budget); bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv); int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
void gve_tx_free_rings_gqi(struct gve_priv *priv); void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
u32 gve_tx_load_event_counter(struct gve_priv *priv, u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx); struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
......
...@@ -516,12 +516,12 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -516,12 +516,12 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
return gve_adminq_issue_cmd(priv, &cmd); return gve_adminq_issue_cmd(priv, &cmd);
} }
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues) int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{ {
int err; int err;
int i; int i;
for (i = 0; i < num_queues; i++) { for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i); err = gve_adminq_create_tx_queue(priv, i);
if (err) if (err)
return err; return err;
...@@ -604,12 +604,12 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -604,12 +604,12 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
return 0; return 0;
} }
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues) int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{ {
int err; int err;
int i; int i;
for (i = 0; i < num_queues; i++) { for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i); err = gve_adminq_destroy_tx_queue(priv, i);
if (err) if (err)
return err; return err;
......
...@@ -410,8 +410,8 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv, ...@@ -410,8 +410,8 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t db_array_bus_addr, dma_addr_t db_array_bus_addr,
u32 num_ntfy_blks); u32 num_ntfy_blks);
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv); int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues); int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id); int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues); int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id); int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv, int gve_adminq_register_page_list(struct gve_priv *priv,
......
...@@ -584,11 +584,26 @@ static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) ...@@ -584,11 +584,26 @@ static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
static int gve_register_qpls(struct gve_priv *priv) static int gve_register_qpls(struct gve_priv *priv)
{ {
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); int start_id;
int err; int err;
int i; int i;
for (i = 0; i < num_qpls; i++) { start_id = gve_tx_start_qpl_id(priv);
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
if (err) {
netif_err(priv, drv, priv->dev,
"failed to register queue page list %d\n",
priv->qpls[i].id);
/* This failure will trigger a reset - no need to clean
* up
*/
return err;
}
}
start_id = gve_rx_start_qpl_id(priv);
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
err = gve_adminq_register_page_list(priv, &priv->qpls[i]); err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, netif_err(priv, drv, priv->dev,
...@@ -605,11 +620,24 @@ static int gve_register_qpls(struct gve_priv *priv) ...@@ -605,11 +620,24 @@ static int gve_register_qpls(struct gve_priv *priv)
static int gve_unregister_qpls(struct gve_priv *priv) static int gve_unregister_qpls(struct gve_priv *priv)
{ {
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); int start_id;
int err; int err;
int i; int i;
for (i = 0; i < num_qpls; i++) { start_id = gve_tx_start_qpl_id(priv);
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
/* This failure will trigger a reset - no need to clean up */
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to unregister queue page list %d\n",
priv->qpls[i].id);
return err;
}
}
start_id = gve_rx_start_qpl_id(priv);
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
/* This failure will trigger a reset - no need to clean up */ /* This failure will trigger a reset - no need to clean up */
if (err) { if (err) {
...@@ -628,7 +656,7 @@ static int gve_create_rings(struct gve_priv *priv) ...@@ -628,7 +656,7 @@ static int gve_create_rings(struct gve_priv *priv)
int err; int err;
int i; int i;
err = gve_adminq_create_tx_queues(priv, num_tx_queues); err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
num_tx_queues); num_tx_queues);
...@@ -695,10 +723,10 @@ static void add_napi_init_sync_stats(struct gve_priv *priv, ...@@ -695,10 +723,10 @@ static void add_napi_init_sync_stats(struct gve_priv *priv,
} }
} }
static void gve_tx_free_rings(struct gve_priv *priv) static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings)
{ {
if (gve_is_gqi(priv)) { if (gve_is_gqi(priv)) {
gve_tx_free_rings_gqi(priv); gve_tx_free_rings_gqi(priv, start_id, num_rings);
} else { } else {
gve_tx_free_rings_dqo(priv); gve_tx_free_rings_dqo(priv);
} }
...@@ -709,20 +737,20 @@ static int gve_alloc_rings(struct gve_priv *priv) ...@@ -709,20 +737,20 @@ static int gve_alloc_rings(struct gve_priv *priv)
int err; int err;
/* Setup tx rings */ /* Setup tx rings */
priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx), priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx),
GFP_KERNEL); GFP_KERNEL);
if (!priv->tx) if (!priv->tx)
return -ENOMEM; return -ENOMEM;
if (gve_is_gqi(priv)) if (gve_is_gqi(priv))
err = gve_tx_alloc_rings(priv); err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv));
else else
err = gve_tx_alloc_rings_dqo(priv); err = gve_tx_alloc_rings_dqo(priv);
if (err) if (err)
goto free_tx; goto free_tx;
/* Setup rx rings */ /* Setup rx rings */
priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx), priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx),
GFP_KERNEL); GFP_KERNEL);
if (!priv->rx) { if (!priv->rx) {
err = -ENOMEM; err = -ENOMEM;
...@@ -747,7 +775,7 @@ static int gve_alloc_rings(struct gve_priv *priv) ...@@ -747,7 +775,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
kvfree(priv->rx); kvfree(priv->rx);
priv->rx = NULL; priv->rx = NULL;
free_tx_queue: free_tx_queue:
gve_tx_free_rings(priv); gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv));
free_tx: free_tx:
kvfree(priv->tx); kvfree(priv->tx);
priv->tx = NULL; priv->tx = NULL;
...@@ -759,7 +787,7 @@ static int gve_destroy_rings(struct gve_priv *priv) ...@@ -759,7 +787,7 @@ static int gve_destroy_rings(struct gve_priv *priv)
int num_tx_queues = gve_num_tx_queues(priv); int num_tx_queues = gve_num_tx_queues(priv);
int err; int err;
err = gve_adminq_destroy_tx_queues(priv, num_tx_queues); err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, netif_err(priv, drv, priv->dev,
"failed to destroy tx queues\n"); "failed to destroy tx queues\n");
...@@ -797,7 +825,7 @@ static void gve_free_rings(struct gve_priv *priv) ...@@ -797,7 +825,7 @@ static void gve_free_rings(struct gve_priv *priv)
ntfy_idx = gve_tx_idx_to_ntfy(priv, i); ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
gve_remove_napi(priv, ntfy_idx); gve_remove_napi(priv, ntfy_idx);
} }
gve_tx_free_rings(priv); gve_tx_free_rings(priv, 0, num_tx_queues);
kvfree(priv->tx); kvfree(priv->tx);
priv->tx = NULL; priv->tx = NULL;
} }
...@@ -894,40 +922,46 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id) ...@@ -894,40 +922,46 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
kvfree(qpl->page_buses); kvfree(qpl->page_buses);
qpl->page_buses = NULL;
free_pages: free_pages:
kvfree(qpl->pages); kvfree(qpl->pages);
qpl->pages = NULL;
priv->num_registered_pages -= qpl->num_entries; priv->num_registered_pages -= qpl->num_entries;
} }
static int gve_alloc_qpls(struct gve_priv *priv) static int gve_alloc_qpls(struct gve_priv *priv)
{ {
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
int start_id;
int i, j; int i, j;
int err; int err;
if (num_qpls == 0) if (priv->queue_format != GVE_GQI_QPL_FORMAT)
return 0; return 0;
priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
if (!priv->qpls) if (!priv->qpls)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < gve_num_tx_qpls(priv); i++) { start_id = gve_tx_start_qpl_id(priv);
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i, err = gve_alloc_queue_page_list(priv, i,
priv->tx_pages_per_qpl); priv->tx_pages_per_qpl);
if (err) if (err)
goto free_qpls; goto free_qpls;
} }
for (; i < num_qpls; i++) {
start_id = gve_rx_start_qpl_id(priv);
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i, err = gve_alloc_queue_page_list(priv, i,
priv->rx_data_slot_cnt); priv->rx_data_slot_cnt);
if (err) if (err)
goto free_qpls; goto free_qpls;
} }
priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) * priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) *
sizeof(unsigned long) * BITS_PER_BYTE; sizeof(unsigned long) * BITS_PER_BYTE;
priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls), priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
sizeof(unsigned long), GFP_KERNEL); sizeof(unsigned long), GFP_KERNEL);
if (!priv->qpl_cfg.qpl_id_map) { if (!priv->qpl_cfg.qpl_id_map) {
err = -ENOMEM; err = -ENOMEM;
...@@ -940,23 +974,26 @@ static int gve_alloc_qpls(struct gve_priv *priv) ...@@ -940,23 +974,26 @@ static int gve_alloc_qpls(struct gve_priv *priv)
for (j = 0; j <= i; j++) for (j = 0; j <= i; j++)
gve_free_queue_page_list(priv, j); gve_free_queue_page_list(priv, j);
kvfree(priv->qpls); kvfree(priv->qpls);
priv->qpls = NULL;
return err; return err;
} }
static void gve_free_qpls(struct gve_priv *priv) static void gve_free_qpls(struct gve_priv *priv)
{ {
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
int i; int i;
if (num_qpls == 0) if (!priv->qpls)
return; return;
kvfree(priv->qpl_cfg.qpl_id_map); kvfree(priv->qpl_cfg.qpl_id_map);
priv->qpl_cfg.qpl_id_map = NULL;
for (i = 0; i < num_qpls; i++) for (i = 0; i < max_queues; i++)
gve_free_queue_page_list(priv, i); gve_free_queue_page_list(priv, i);
kvfree(priv->qpls); kvfree(priv->qpls);
priv->qpls = NULL;
} }
/* Use this to schedule a reset when the device is capable of continuing /* Use this to schedule a reset when the device is capable of continuing
......
...@@ -124,7 +124,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) ...@@ -124,7 +124,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return -ENOMEM; return -ENOMEM;
if (!rx->data.raw_addressing) { if (!rx->data.raw_addressing) {
rx->data.qpl = gve_assign_rx_qpl(priv); rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num);
if (!rx->data.qpl) { if (!rx->data.qpl) {
kvfree(rx->data.page_info); kvfree(rx->data.page_info);
rx->data.page_info = NULL; rx->data.page_info = NULL;
......
...@@ -195,7 +195,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -195,7 +195,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
tx->dev = &priv->pdev->dev; tx->dev = &priv->pdev->dev;
if (!tx->raw_addressing) { if (!tx->raw_addressing) {
tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx);
if (!tx->tx_fifo.qpl) if (!tx->tx_fifo.qpl)
goto abort_with_desc; goto abort_with_desc;
/* map Tx FIFO */ /* map Tx FIFO */
...@@ -233,12 +233,12 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -233,12 +233,12 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
return -ENOMEM; return -ENOMEM;
} }
int gve_tx_alloc_rings(struct gve_priv *priv) int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings)
{ {
int err = 0; int err = 0;
int i; int i;
for (i = 0; i < priv->tx_cfg.num_queues; i++) { for (i = start_id; i < start_id + num_rings; i++) {
err = gve_tx_alloc_ring(priv, i); err = gve_tx_alloc_ring(priv, i);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, netif_err(priv, drv, priv->dev,
...@@ -251,17 +251,17 @@ int gve_tx_alloc_rings(struct gve_priv *priv) ...@@ -251,17 +251,17 @@ int gve_tx_alloc_rings(struct gve_priv *priv)
if (err) { if (err) {
int j; int j;
for (j = 0; j < i; j++) for (j = start_id; j < i; j++)
gve_tx_free_ring(priv, j); gve_tx_free_ring(priv, j);
} }
return err; return err;
} }
void gve_tx_free_rings_gqi(struct gve_priv *priv) void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings)
{ {
int i; int i;
for (i = 0; i < priv->tx_cfg.num_queues; i++) for (i = start_id; i < start_id + num_rings; i++)
gve_tx_free_ring(priv, i); gve_tx_free_ring(priv, i);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment