Commit 72f299b0 authored by David S. Miller's avatar David S. Miller

Merge branch 'ena-fixes'

David Arinzon says:

====================
ENA driver bug fixes
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1573c688 a8ee104f
......@@ -2400,9 +2400,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EOPNOTSUPP;
}
switch (func) {
case ENA_ADMIN_TOEPLITZ:
if (key) {
if ((func == ENA_ADMIN_TOEPLITZ) && key) {
if (key_len != sizeof(hash_key->key)) {
netdev_err(ena_dev->net_device,
"key len (%u) doesn't equal the supported size (%zu)\n",
......@@ -2410,19 +2408,10 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EINVAL;
}
memcpy(hash_key->key, key, key_len);
rss->hash_init_val = init_val;
hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
}
break;
case ENA_ADMIN_CRC32:
rss->hash_init_val = init_val;
break;
default:
netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
func);
return -EINVAL;
}
rss->hash_init_val = init_val;
old_func = rss->hash_func;
rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
......
......@@ -887,11 +887,7 @@ static int ena_set_tunable(struct net_device *netdev,
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
len = *(u32 *)data;
if (len > adapter->netdev->mtu) {
ret = -EINVAL;
break;
}
adapter->rx_copybreak = len;
ret = ena_set_rx_copybreak(adapter, len);
break;
default:
ret = -EINVAL;
......
......@@ -374,9 +374,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
u32 verdict = ENA_XDP_PASS;
struct bpf_prog *xdp_prog;
struct ena_ring *xdp_ring;
u32 verdict = XDP_PASS;
struct xdp_frame *xdpf;
u64 *xdp_stat;
......@@ -393,7 +393,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
if (unlikely(!xdpf)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
verdict = XDP_ABORTED;
verdict = ENA_XDP_DROP;
break;
}
......@@ -409,29 +409,35 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
spin_unlock(&xdp_ring->xdp_tx_lock);
xdp_stat = &rx_ring->rx_stats.xdp_tx;
verdict = ENA_XDP_TX;
break;
case XDP_REDIRECT:
if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
xdp_stat = &rx_ring->rx_stats.xdp_redirect;
verdict = ENA_XDP_REDIRECT;
break;
}
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
verdict = XDP_ABORTED;
verdict = ENA_XDP_DROP;
break;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
verdict = ENA_XDP_DROP;
break;
case XDP_DROP:
xdp_stat = &rx_ring->rx_stats.xdp_drop;
verdict = ENA_XDP_DROP;
break;
case XDP_PASS:
xdp_stat = &rx_ring->rx_stats.xdp_pass;
verdict = ENA_XDP_PASS;
break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_invalid;
verdict = ENA_XDP_DROP;
}
ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
......@@ -512,16 +518,18 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
struct bpf_prog *prog,
int first, int count)
{
struct bpf_prog *old_bpf_prog;
struct ena_ring *rx_ring;
int i = 0;
for (i = first; i < count; i++) {
rx_ring = &adapter->rx_ring[i];
xchg(&rx_ring->xdp_bpf_prog, prog);
if (prog) {
old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
if (!old_bpf_prog && prog) {
ena_xdp_register_rxq_info(rx_ring);
rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
} else {
} else if (old_bpf_prog && !prog) {
ena_xdp_unregister_rxq_info(rx_ring);
rx_ring->rx_headroom = NET_SKB_PAD;
}
......@@ -672,6 +680,7 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0;
ring->cpu = 0;
ring->numa_node = 0;
ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
......@@ -775,6 +784,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
tx_ring->cpu = ena_irq->cpu;
tx_ring->numa_node = node;
return 0;
err_push_buf_intermediate_buf:
......@@ -907,6 +917,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->cpu = ena_irq->cpu;
rx_ring->numa_node = node;
return 0;
}
......@@ -1619,12 +1630,12 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
* we expect, then we simply drop it
*/
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
return XDP_DROP;
return ENA_XDP_DROP;
ret = ena_xdp_execute(rx_ring, xdp);
/* The xdp program might expand the headers */
if (ret == XDP_PASS) {
if (ret == ENA_XDP_PASS) {
rx_info->page_offset = xdp->data - xdp->data_hard_start;
rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
}
......@@ -1663,7 +1674,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
do {
xdp_verdict = XDP_PASS;
xdp_verdict = ENA_XDP_PASS;
skb = NULL;
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
......@@ -1691,7 +1702,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
/* allocate skb and fill it */
if (xdp_verdict == XDP_PASS)
if (xdp_verdict == ENA_XDP_PASS)
skb = ena_rx_skb(rx_ring,
rx_ring->ena_bufs,
ena_rx_ctx.descs,
......@@ -1709,14 +1720,15 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* Packets was passed for transmission, unmap it
* from RX side.
*/
if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
if (xdp_verdict & ENA_XDP_FORWARDED) {
ena_unmap_rx_buff(rx_ring,
&rx_ring->rx_buffer_info[req_id]);
rx_ring->rx_buffer_info[req_id].page = NULL;
}
}
if (xdp_verdict != XDP_PASS) {
if (xdp_verdict != ENA_XDP_PASS) {
xdp_flags |= xdp_verdict;
total_len += ena_rx_ctx.ena_bufs[0].len;
res_budget--;
continue;
}
......@@ -1760,7 +1772,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ena_refill_rx_bufs(rx_ring, refill_required);
}
if (xdp_flags & XDP_REDIRECT)
if (xdp_flags & ENA_XDP_REDIRECT)
xdp_do_flush_map();
return work_done;
......@@ -1814,8 +1826,9 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
static void ena_unmask_interrupt(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
u32 rx_interval = tx_ring->smoothed_interval;
struct ena_eth_io_intr_reg intr_reg;
u32 rx_interval = 0;
/* Rx ring can be NULL when for XDP tx queues which don't have an
* accompanying rx_ring pair.
*/
......@@ -1853,19 +1866,26 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
if (likely(tx_ring->cpu == cpu))
goto out;
tx_ring->cpu = cpu;
if (rx_ring)
rx_ring->cpu = cpu;
numa_node = cpu_to_node(cpu);
if (likely(tx_ring->numa_node == numa_node))
goto out;
put_cpu();
if (numa_node != NUMA_NO_NODE) {
ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
if (rx_ring)
tx_ring->numa_node = numa_node;
if (rx_ring) {
rx_ring->numa_node = numa_node;
ena_com_update_numa_node(rx_ring->ena_com_io_cq,
numa_node);
}
tx_ring->cpu = cpu;
if (rx_ring)
rx_ring->cpu = cpu;
}
return;
out:
......@@ -1987,11 +2007,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
ena_adjust_adaptive_rx_intr_moderation(ena_napi);
ena_update_ring_numa_node(tx_ring, rx_ring);
ena_unmask_interrupt(tx_ring, rx_ring);
}
ena_update_ring_numa_node(tx_ring, rx_ring);
ret = rx_work_done;
} else {
ret = budget;
......@@ -2376,7 +2395,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.msix_vector = msix_vector;
ctx.queue_size = tx_ring->ring_size;
ctx.numa_node = cpu_to_node(tx_ring->cpu);
ctx.numa_node = tx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
......@@ -2444,7 +2463,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.msix_vector = msix_vector;
ctx.queue_size = rx_ring->ring_size;
ctx.numa_node = cpu_to_node(rx_ring->cpu);
ctx.numa_node = rx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
......@@ -2805,6 +2824,24 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
return dev_was_up ? ena_up(adapter) : 0;
}
int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
{
struct ena_ring *rx_ring;
int i;
if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
return -EINVAL;
adapter->rx_copybreak = rx_copybreak;
for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
rx_ring->rx_copybreak = rx_copybreak;
}
return 0;
}
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
......
......@@ -262,8 +262,10 @@ struct ena_ring {
bool disable_meta_caching;
u16 no_interrupt_event_cnt;
/* cpu for TPH */
/* cpu and NUMA for TPH */
int cpu;
int numa_node;
/* number of tx/rx_buffer_info's entries */
int ring_size;
......@@ -392,6 +394,8 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak);
int ena_get_sset_count(struct net_device *netdev, int sset);
static inline void ena_reset_device(struct ena_adapter *adapter,
......@@ -409,6 +413,15 @@ enum ena_xdp_errors_t {
ENA_XDP_NO_ENOUGH_QUEUES,
};
enum ENA_XDP_ACTIONS {
ENA_XDP_PASS = 0,
ENA_XDP_TX = BIT(0),
ENA_XDP_REDIRECT = BIT(1),
ENA_XDP_DROP = BIT(2)
};
#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
static inline bool ena_xdp_present(struct ena_adapter *adapter)
{
return !!adapter->xdp_bpf_prog;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment