Commit 6d293026 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-06-23' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 updates 2020-06-23

This series adds misc cleanup and updates to mlx5 driver.

1) Misc updates and cleanup
2) Use RCU instead of spinlock for vxlan table

v1->v2:
 - Removed unnecessary Fixes Tags

v2->v3:
 - Drop "macro undefine" patch, it has no value

v3->v4:
 - Drop the Relaxed ordering patch.
====================
Reviewed-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c6d5d843 efbb974d
......@@ -676,7 +676,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
/* Copy the block to local buffer to avoid HW override while being processed*/
/* Copy the block to local buffer to avoid HW override while being processed */
memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
TRACER_BLOCK_SIZE_BYTE);
......
......@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <net/netevent.h>
#include <net/arp.h>
#include "neigh.h"
#include "tc.h"
#include "en_rep.h"
......
......@@ -215,16 +215,3 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) :
mlx5e_xsk_disable_umem(priv, ix);
}
u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk)
{
u16 res = xsk->refcnt ? params->num_channels : 0;
while (res) {
if (mlx5e_xsk_get_umem(params, xsk, res - 1))
break;
--res;
}
return res;
}
......@@ -26,6 +26,4 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries);
u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk);
#endif /* __MLX5_EN_XSK_UMEM_H__ */
......@@ -220,7 +220,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
sizeof(*ft->g), GFP_KERNEL);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kvfree(ft->g);
kfree(ft->g);
kvfree(in);
return -ENOMEM;
}
......
......@@ -35,7 +35,6 @@
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/act_api.h>
#include <net/arp.h>
#include <net/devlink.h>
#include <net/ipv6_stubs.h>
......
......@@ -40,6 +40,14 @@
#ifdef CONFIG_MLX5_ESWITCH
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
struct net_device *tun_dev;
};
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
struct tunnel_match_key {
struct flow_dissector_key_control enc_control;
struct flow_dissector_key_keyid enc_key_id;
......@@ -114,8 +122,6 @@ void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_l
struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
enum mlx5e_tc_attr_to_reg {
......@@ -142,10 +148,6 @@ extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
struct mlx5e_tc_update_priv {
struct net_device *tun_dev;
};
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
......@@ -174,8 +176,6 @@ void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
void *headers_c, void *headers_v);
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
......
......@@ -44,16 +44,6 @@
#include "lib/mpfs.h"
#include "en/tc_ct.h"
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
#ifdef CONFIG_MLX5_ESWITCH
#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
......
......@@ -41,7 +41,6 @@
#include "diag/fs_tracepoint.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
#include "eswitch.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
......
......@@ -39,6 +39,16 @@
#include <linux/llist.h>
#include <steering/fs_dr.h>
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
union {
......
......@@ -40,7 +40,6 @@
struct mlx5_vxlan {
struct mlx5_core_dev *mdev;
spinlock_t lock; /* protect vxlan table */
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
DECLARE_HASHTABLE(htable, 4);
int num_ports;
......@@ -78,45 +77,47 @@ static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in);
}
static struct mlx5_vxlan_port*
mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
bool found = false;
hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
if (vxlanp->udp_port == port)
return vxlanp;
}
if (!mlx5_vxlan_allowed(vxlan))
return NULL;
return NULL;
rcu_read_lock();
hash_for_each_possible_rcu(vxlan->htable, vxlanp, hlist, port)
if (vxlanp->udp_port == port) {
found = true;
break;
}
rcu_read_unlock();
return found;
}
struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
if (!mlx5_vxlan_allowed(vxlan))
return NULL;
spin_lock_bh(&vxlan->lock);
vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
spin_unlock_bh(&vxlan->lock);
return vxlanp;
hash_for_each_possible(vxlan->htable, vxlanp, hlist, port)
if (vxlanp->udp_port == port)
return vxlanp;
return NULL;
}
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
int ret = -ENOSPC;
int ret = 0;
vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
mutex_lock(&vxlan->sync_lock);
vxlanp = vxlan_lookup_port(vxlan, port);
if (vxlanp) {
refcount_inc(&vxlanp->refcount);
return 0;
goto unlock;
}
mutex_lock(&vxlan->sync_lock);
if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
mlx5_core_info(vxlan->mdev,
"UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
......@@ -138,9 +139,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
vxlanp->udp_port = port;
refcount_set(&vxlanp->refcount, 1);
spin_lock_bh(&vxlan->lock);
hash_add(vxlan->htable, &vxlanp->hlist, port);
spin_unlock_bh(&vxlan->lock);
hash_add_rcu(vxlan->htable, &vxlanp->hlist, port);
vxlan->num_ports++;
mutex_unlock(&vxlan->sync_lock);
......@@ -157,34 +156,26 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
bool remove = false;
int ret = 0;
mutex_lock(&vxlan->sync_lock);
spin_lock_bh(&vxlan->lock);
vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
vxlanp = vxlan_lookup_port(vxlan, port);
if (!vxlanp) {
ret = -ENOENT;
goto out_unlock;
}
if (refcount_dec_and_test(&vxlanp->refcount)) {
hash_del(&vxlanp->hlist);
remove = true;
}
out_unlock:
spin_unlock_bh(&vxlan->lock);
if (remove) {
hash_del_rcu(&vxlanp->hlist);
synchronize_rcu();
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
kfree(vxlanp);
vxlan->num_ports--;
}
out_unlock:
mutex_unlock(&vxlan->sync_lock);
return ret;
}
......@@ -201,7 +192,6 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
vxlan->mdev = mdev;
mutex_init(&vxlan->sync_lock);
spin_lock_init(&vxlan->lock);
hash_init(vxlan->htable);
/* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */
......
......@@ -50,15 +50,14 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev);
void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
#else
static inline struct mlx5_vxlan*
mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline struct mx5_vxlan_port*
mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return NULL; }
static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; }
#endif
#endif /* __MLX5_VXLAN_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment