Commit 688dc536 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-next'

Tariq Toukan says:

====================
mlx4 misc cleanups and improvements

This patchset contains some cleanups and improvements from the team
to the mlx4 Eth and core drivers.

Series generated against net-next commit:
5a7a5555 'net sched: stylistic cleanups'
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c3d9b9f3 a7e1f049
...@@ -785,17 +785,23 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, ...@@ -785,17 +785,23 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO); return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
int ret;
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
return mlx4_internal_err_ret_value(dev, op, return mlx4_internal_err_ret_value(dev, op,
op_modifier); op_modifier);
down_read(&mlx4_priv(dev)->cmd.switch_sem);
if (mlx4_priv(dev)->cmd.use_events) if (mlx4_priv(dev)->cmd.use_events)
return mlx4_cmd_wait(dev, in_param, out_param, ret = mlx4_cmd_wait(dev, in_param, out_param,
out_is_imm, in_modifier, out_is_imm, in_modifier,
op_modifier, op, timeout); op_modifier, op, timeout);
else else
return mlx4_cmd_poll(dev, in_param, out_param, ret = mlx4_cmd_poll(dev, in_param, out_param,
out_is_imm, in_modifier, out_is_imm, in_modifier,
op_modifier, op, timeout); op_modifier, op, timeout);
up_read(&mlx4_priv(dev)->cmd.switch_sem);
return ret;
} }
return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm, return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
in_modifier, op_modifier, op, timeout); in_modifier, op_modifier, op, timeout);
...@@ -2454,6 +2460,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev) ...@@ -2454,6 +2460,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
int flags = 0; int flags = 0;
if (!priv->cmd.initialized) { if (!priv->cmd.initialized) {
init_rwsem(&priv->cmd.switch_sem);
mutex_init(&priv->cmd.slave_cmd_mutex); mutex_init(&priv->cmd.slave_cmd_mutex);
sema_init(&priv->cmd.poll_sem, 1); sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0; priv->cmd.use_events = 0;
...@@ -2583,6 +2590,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) ...@@ -2583,6 +2590,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
if (!priv->cmd.context) if (!priv->cmd.context)
return -ENOMEM; return -ENOMEM;
down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) { for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i; priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1; priv->cmd.context[i].next = i + 1;
...@@ -2606,6 +2614,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) ...@@ -2606,6 +2614,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
down(&priv->cmd.poll_sem); down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1; priv->cmd.use_events = 1;
up_write(&priv->cmd.switch_sem);
return err; return err;
} }
...@@ -2618,6 +2627,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev) ...@@ -2618,6 +2627,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
int i; int i;
down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0; priv->cmd.use_events = 0;
for (i = 0; i < priv->cmd.max_cmds; ++i) for (i = 0; i < priv->cmd.max_cmds; ++i)
...@@ -2626,6 +2636,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev) ...@@ -2626,6 +2636,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
kfree(priv->cmd.context); kfree(priv->cmd.context);
up(&priv->cmd.poll_sem); up(&priv->cmd.poll_sem);
up_write(&priv->cmd.switch_sem);
} }
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
......
...@@ -72,7 +72,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, ...@@ -72,7 +72,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
} }
dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
frag_info->dma_dir); frag_info->dma_dir);
if (dma_mapping_error(priv->ddev, dma)) { if (unlikely(dma_mapping_error(priv->ddev, dma))) {
put_page(page); put_page(page);
return -ENOMEM; return -ENOMEM;
} }
...@@ -108,7 +108,8 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, ...@@ -108,7 +108,8 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
ring_alloc[i].page_size) ring_alloc[i].page_size)
continue; continue;
if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
frag_info, gfp)))
goto out; goto out;
} }
...@@ -585,7 +586,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, ...@@ -585,7 +586,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
frag_info = &priv->frag_info[nr]; frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size) if (length <= frag_info->frag_prefix_size)
break; break;
if (!frags[nr].page) if (unlikely(!frags[nr].page))
goto fail; goto fail;
dma = be64_to_cpu(rx_desc->data[nr].addr); dma = be64_to_cpu(rx_desc->data[nr].addr);
...@@ -625,7 +626,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, ...@@ -625,7 +626,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
dma_addr_t dma; dma_addr_t dma;
skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN); skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
if (!skb) { if (unlikely(!skb)) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n"); en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL; return NULL;
} }
...@@ -736,7 +737,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, ...@@ -736,7 +737,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
{ {
__wsum csum_pseudo_hdr = 0; __wsum csum_pseudo_hdr = 0;
if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
ipv6h->nexthdr == IPPROTO_HOPOPTS))
return -1; return -1;
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
...@@ -769,7 +771,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, ...@@ -769,7 +771,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
get_fixed_ipv4_csum(hw_checksum, skb, hdr); get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
if (get_fixed_ipv6_csum(hw_checksum, skb, hdr)) if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
return -1; return -1;
#endif #endif
return 0; return 0;
...@@ -796,10 +798,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -796,10 +798,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
u64 timestamp; u64 timestamp;
bool l2_tunnel; bool l2_tunnel;
if (!priv->port_up) if (unlikely(!priv->port_up))
return 0; return 0;
if (budget <= 0) if (unlikely(budget <= 0))
return polled; return polled;
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
...@@ -902,9 +904,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -902,9 +904,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_PASS: case XDP_PASS:
break; break;
case XDP_TX: case XDP_TX:
if (!mlx4_en_xmit_frame(frags, dev, if (likely(!mlx4_en_xmit_frame(frags, dev,
length, tx_index, length, tx_index,
&doorbell_pending)) &doorbell_pending)))
goto consumed; goto consumed;
goto xdp_drop; /* Drop on xmit failure */ goto xdp_drop; /* Drop on xmit failure */
default: default:
...@@ -912,7 +914,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -912,7 +914,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_ABORTED: case XDP_ABORTED:
case XDP_DROP: case XDP_DROP:
xdp_drop: xdp_drop:
if (mlx4_en_rx_recycle(ring, frags)) if (likely(mlx4_en_rx_recycle(ring, frags)))
goto consumed; goto consumed;
goto next; goto next;
} }
...@@ -1016,12 +1018,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -1016,12 +1018,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* GRO not possible, complete processing here */ /* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) { if (unlikely(!skb)) {
ring->dropped++; ring->dropped++;
goto next; goto next;
} }
if (unlikely(priv->validate_loopback)) { if (unlikely(priv->validate_loopback)) {
validate_loopback(priv, skb); validate_loopback(priv, skb);
goto next; goto next;
} }
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <net/devlink.h> #include <net/devlink.h>
#include <linux/rwsem.h>
#include <linux/mlx4/device.h> #include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h> #include <linux/mlx4/driver.h>
...@@ -627,6 +628,7 @@ struct mlx4_cmd { ...@@ -627,6 +628,7 @@ struct mlx4_cmd {
struct mutex slave_cmd_mutex; struct mutex slave_cmd_mutex;
struct semaphore poll_sem; struct semaphore poll_sem;
struct semaphore event_sem; struct semaphore event_sem;
struct rw_semaphore switch_sem;
int max_cmds; int max_cmds;
spinlock_t context_lock; spinlock_t context_lock;
int free_head; int free_head;
......
...@@ -45,15 +45,12 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) ...@@ -45,15 +45,12 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq; struct mlx4_srq *srq;
spin_lock(&srq_table->lock); rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
rcu_read_unlock();
if (srq) if (srq)
atomic_inc(&srq->refcount); atomic_inc(&srq->refcount);
else {
spin_unlock(&srq_table->lock);
if (!srq) {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return; return;
} }
...@@ -301,12 +298,11 @@ struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn) ...@@ -301,12 +298,11 @@ struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{ {
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq; struct mlx4_srq *srq;
unsigned long flags;
spin_lock_irqsave(&srq_table->lock, flags); rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree, srq = radix_tree_lookup(&srq_table->tree,
srqn & (dev->caps.num_srqs - 1)); srqn & (dev->caps.num_srqs - 1));
spin_unlock_irqrestore(&srq_table->lock, flags); rcu_read_unlock();
return srq; return srq;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment