Commit 37dff155 authored by Netanel Belgazal's avatar Netanel Belgazal Committed by David S. Miller

net: ena: fix incorrect usage of memory barriers

Added memory barriers where they were missing to support multiple
architectures, and removed redundant ones.

As part of removing the redundant memory barriers and improving
performance, we moved to more relaxed versions of memory barriers,
as well as to the more relaxed version of writel - writel_relaxed,
while maintaining correctness.
Signed-off-by: default avatarNetanel Belgazal <netanel@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 28abf4e9
...@@ -464,7 +464,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu ...@@ -464,7 +464,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
/* Do not read the rest of the completion entry before the /* Do not read the rest of the completion entry before the
* phase bit was validated * phase bit was validated
*/ */
rmb(); dma_rmb();
ena_com_handle_single_admin_completion(admin_queue, cqe); ena_com_handle_single_admin_completion(admin_queue, cqe);
head_masked++; head_masked++;
...@@ -627,15 +627,8 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) ...@@ -627,15 +627,8 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
mmio_read_reg |= mmio_read->seq_num & mmio_read_reg |= mmio_read->seq_num &
ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
/* make sure read_resp->req_id get updated before the hw can write writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
* there
*/
wmb();
writel_relaxed(mmio_read_reg,
ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
mmiowb();
for (i = 0; i < timeout; i++) { for (i = 0; i < timeout; i++) {
if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
break; break;
...@@ -1798,6 +1791,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) ...@@ -1798,6 +1791,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* Go over all the events */ /* Go over all the events */
while ((READ_ONCE(aenq_common->flags) & while ((READ_ONCE(aenq_common->flags) &
ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Make sure the phase bit (ownership) is as expected before
* reading the rest of the descriptor.
*/
dma_rmb();
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrom, aenq_common->group, aenq_common->syndrom,
(u64)aenq_common->timestamp_low + (u64)aenq_common->timestamp_low +
......
...@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( ...@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
if (desc_phase != expected_phase) if (desc_phase != expected_phase)
return NULL; return NULL;
/* Make sure we read the rest of the descriptor after the phase bit
* has been read
*/
dma_rmb();
return cdesc; return cdesc;
} }
...@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) ...@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
if (cdesc_phase != expected_phase) if (cdesc_phase != expected_phase)
return -EAGAIN; return -EAGAIN;
dma_rmb();
if (unlikely(cdesc->req_id >= io_cq->q_depth)) { if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
pr_err("Invalid req id %d\n", cdesc->req_id); pr_err("Invalid req id %d\n", cdesc->req_id);
return -EINVAL; return -EINVAL;
......
...@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) ...@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
return io_sq->q_depth - 1 - cnt; return io_sq->q_depth - 1 - cnt;
} }
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
bool relaxed)
{ {
u16 tail; u16 tail;
...@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, ...@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
pr_debug("write submission queue doorbell for queue: %d tail: %d\n", pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail); io_sq->qid, tail);
if (relaxed) writel(tail, io_sq->db_addr);
writel_relaxed(tail, io_sq->db_addr);
else
writel(tail, io_sq->db_addr);
return 0; return 0;
} }
......
...@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) ...@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rx_ring->qid, i, num); rx_ring->qid, i, num);
} }
if (likely(i)) { /* ena_com_write_sq_doorbell issues a wmb() */
/* Add memory barrier to make sure the desc were written before if (likely(i))
* issue a doorbell ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
*/
wmb();
ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
mmiowb();
}
rx_ring->next_to_use = next_to_use; rx_ring->next_to_use = next_to_use;
...@@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
tx_ring->ring_size); tx_ring->ring_size);
/* This WMB is aimed to:
* 1 - perform smp barrier before reading next_to_completion
* 2 - make sure the desc were written before trigger DB
*/
wmb();
/* stop the queue when no more space available, the packet can have up /* stop the queue when no more space available, the packet can have up
* to sgl_size + 2. one for the meta descriptor and one for header * to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size). * (if the header is larger than tx_max_header_size).
...@@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* stop the queue but meanwhile clean_tx_irq updates * stop the queue but meanwhile clean_tx_irq updates
* next_to_completion and terminates. * next_to_completion and terminates.
* The queue will remain stopped forever. * The queue will remain stopped forever.
* To solve this issue this function perform rmb, check * To solve this issue add a mb() to make sure that
* the wakeup condition and wake up the queue if needed. * netif_tx_stop_queue() write is vissible before checking if
* there is additional space in the queue.
*/ */
smp_rmb(); smp_mb();
if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
> ENA_TX_WAKEUP_THRESH) { > ENA_TX_WAKEUP_THRESH) {
...@@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
if (netif_xmit_stopped(txq) || !skb->xmit_more) { if (netif_xmit_stopped(txq) || !skb->xmit_more) {
/* trigger the dma engine */ /* trigger the dma engine. ena_com_write_sq_doorbell()
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false); * has a mb
*/
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
u64_stats_update_begin(&tx_ring->syncp); u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.doorbells++; tx_ring->tx_stats.doorbells++;
u64_stats_update_end(&tx_ring->syncp); u64_stats_update_end(&tx_ring->syncp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment