Commit 689b2bda authored by Arthur Kiyanovski's avatar Arthur Kiyanovski Committed by David S. Miller

net: ena: add functions for handling Low Latency Queues in ena_com

This patch introduces APIs for detection, initialization, configuration
and actual usage of low latency queues(LLQ). It extends transmit API with
creation of LLQ descriptors in device memory (which include host buffers
descriptors as well as packet header)
Signed-off-by: default avatarArthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a7982b8e
This diff is collapsed.
......@@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/spinlock.h>
......@@ -973,6 +974,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
enum ena_intr_moder_level level,
struct ena_intr_moder_entry *entry);
/* ena_com_config_dev_mode - Configure the placement policy of the device.
* @ena_dev: ENA communication layer struct
* @llq_features: LLQ feature descriptor, retrieve via
* ena_com_get_dev_attr_feat.
* @ena_llq_config: The default driver LLQ parameters configurations
*/
int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_config);
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
......@@ -1082,4 +1093,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
{
u16 size, buffers_num;
u8 *buf;
size = bounce_buf_ctrl->buffer_size;
buffers_num = bounce_buf_ctrl->buffers_num;
buf = bounce_buf_ctrl->base_buffer +
(bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
prefetchw(bounce_buf_ctrl->base_buffer +
(bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
return buf;
}
#endif /* !(ENA_COM) */
......@@ -94,7 +94,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
writel(intr_reg->intr_control, io_cq->unmask_reg);
}
static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
{
u16 tail, next_to_comp, cnt;
......@@ -105,11 +105,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
return io_sq->q_depth - 1 - cnt;
}
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
/* Check if the submission queue has enough space to hold required_buffers */
static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
u16 required_buffers)
{
u16 tail;
int temp;
tail = io_sq->tail;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
return ena_com_free_desc(io_sq) >= required_buffers;
/* This calculation doesn't need to be 100% accurate. So to reduce
* the calculation overhead just Subtract 2 lines from the free descs
* (one for the header line and one to compensate the devision
* down calculation.
*/
temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
return ena_com_free_desc(io_sq) > temp;
}
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{
u16 tail = io_sq->tail;
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
......
......@@ -804,12 +804,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
*/
smp_mb();
above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
ENA_TX_WAKEUP_THRESH;
above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
ENA_TX_WAKEUP_THRESH);
if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
__netif_tx_lock(txq, smp_processor_id());
above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
ENA_TX_WAKEUP_THRESH;
above_thresh =
ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
ENA_TX_WAKEUP_THRESH);
if (netif_tx_queue_stopped(txq) && above_thresh) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
......@@ -1101,7 +1102,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->next_to_clean = next_to_clean;
refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
/* Optimization, try to batch new rx buffers */
......@@ -2110,8 +2111,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
*/
if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
(tx_ring->sgl_size + 2))) {
if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
tx_ring->sgl_size + 2))) {
netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
__func__, qid);
......@@ -2130,8 +2131,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_mb();
if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
> ENA_TX_WAKEUP_THRESH) {
if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
ENA_TX_WAKEUP_THRESH)) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.queue_wakeup++;
......@@ -2804,7 +2805,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
rx_ring = &adapter->rx_ring[i];
refill_required =
ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
ena_com_free_desc(rx_ring->ena_com_io_sq);
if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
rx_ring->empty_rx_queue++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment