Commit 14690995 authored by Yanguo Li's avatar Yanguo Li Committed by David S. Miller

nfp: flower: avoid rmmod nfp crash issues

When there are CT table entries, and you rmmod nfp, the following
events can happen:

task1:
    nfp_net_pci_remove
          ↓
    nfp_flower_stop->(asynchronous)tcf_ct_flow_table_cleanup_work(3)
          ↓
    nfp_zone_table_entry_destroy(1)

task2:
    nfp_fl_ct_handle_nft_flow(2)

When the execution order is (1)->(2)->(3), it will crash. Therefore, in
the function nfp_fl_ct_del_flow, nf_flow_table_offload_del_cb needs to
be executed synchronously.

At the same time, in order to solve the deadlock problem and the problem
of rtnl_lock sometimes failing, replace rtnl_lock with the private
nfp_fl_lock.

Fixes: 7cc93d88 ("nfp: flower-ct: remove callback delete deadlock")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarYanguo Li <yanguo.li@corigine.com>
Signed-off-by: default avatarLouis Peens <louis.peens@corigine.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8f8abb86
...@@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_flower_cmsg_merge_hint *msg; struct nfp_flower_cmsg_merge_hint *msg;
struct nfp_fl_payload *sub_flows[2]; struct nfp_fl_payload *sub_flows[2];
struct nfp_flower_priv *priv;
int err, i, flow_cnt; int err, i, flow_cnt;
msg = nfp_flower_cmsg_get_data(skb); msg = nfp_flower_cmsg_get_data(skb);
...@@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
return; return;
} }
rtnl_lock(); priv = app->priv;
mutex_lock(&priv->nfp_fl_lock);
for (i = 0; i < flow_cnt; i++) { for (i = 0; i < flow_cnt; i++) {
u32 ctx = be32_to_cpu(msg->flow[i].host_ctx); u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx); sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
if (!sub_flows[i]) { if (!sub_flows[i]) {
nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n"); nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
goto err_rtnl_unlock; goto err_mutex_unlock;
} }
} }
...@@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
if (err == -ENOMEM) if (err == -ENOMEM)
nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n"); nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
err_rtnl_unlock: err_mutex_unlock:
rtnl_unlock(); mutex_unlock(&priv->nfp_fl_lock);
} }
static void static void
......
...@@ -2131,8 +2131,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl ...@@ -2131,8 +2131,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
struct nfp_fl_ct_flow_entry *ct_entry; struct nfp_fl_ct_flow_entry *ct_entry;
struct netlink_ext_ack *extack = NULL; struct netlink_ext_ack *extack = NULL;
ASSERT_RTNL();
extack = flow->common.extack; extack = flow->common.extack;
switch (flow->command) { switch (flow->command) {
case FLOW_CLS_REPLACE: case FLOW_CLS_REPLACE:
...@@ -2178,9 +2176,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb ...@@ -2178,9 +2176,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
rtnl_lock(); while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
if (!zt->nft) /* avoid deadlock */
return err;
msleep(20);
}
err = nfp_fl_ct_offload_nft_flow(zt, flow); err = nfp_fl_ct_offload_nft_flow(zt, flow);
rtnl_unlock(); mutex_unlock(&zt->priv->nfp_fl_lock);
break; break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -2208,6 +2210,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) ...@@ -2208,6 +2210,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
struct nfp_fl_ct_flow_entry *ct_entry; struct nfp_fl_ct_flow_entry *ct_entry;
struct nfp_fl_ct_zone_entry *zt; struct nfp_fl_ct_zone_entry *zt;
struct rhashtable *m_table; struct rhashtable *m_table;
struct nf_flowtable *nft;
if (!ct_map_ent) if (!ct_map_ent)
return -ENOENT; return -ENOENT;
...@@ -2226,8 +2229,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) ...@@ -2226,8 +2229,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
if (ct_map_ent->cookie > 0) if (ct_map_ent->cookie > 0)
kfree(ct_map_ent); kfree(ct_map_ent);
if (!zt->pre_ct_count) { if (!zt->pre_ct_count && zt->nft) {
zt->nft = NULL; nft = zt->nft;
zt->nft = NULL; /* avoid deadlock */
nf_flow_table_offload_del_cb(nft,
nfp_fl_ct_handle_nft_flow,
zt);
nfp_fl_ct_clean_nft_entries(zt); nfp_fl_ct_clean_nft_entries(zt);
} }
break; break;
......
...@@ -297,6 +297,7 @@ struct nfp_fl_internal_ports { ...@@ -297,6 +297,7 @@ struct nfp_fl_internal_ports {
* @predt_list: List to keep track of decap pretun flows * @predt_list: List to keep track of decap pretun flows
* @neigh_table: Table to keep track of neighbor entries * @neigh_table: Table to keep track of neighbor entries
* @predt_lock: Lock to serialise predt/neigh table updates * @predt_lock: Lock to serialise predt/neigh table updates
* @nfp_fl_lock: Lock to protect the flow offload operation
*/ */
struct nfp_flower_priv { struct nfp_flower_priv {
struct nfp_app *app; struct nfp_app *app;
...@@ -339,6 +340,7 @@ struct nfp_flower_priv { ...@@ -339,6 +340,7 @@ struct nfp_flower_priv {
struct list_head predt_list; struct list_head predt_list;
struct rhashtable neigh_table; struct rhashtable neigh_table;
spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */ spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */
struct mutex nfp_fl_lock; /* Protect the flow operation */
}; };
/** /**
......
...@@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, ...@@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err) if (err)
goto err_free_stats_ctx_table; goto err_free_stats_ctx_table;
mutex_init(&priv->nfp_fl_lock);
err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params); err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
if (err) if (err)
goto err_free_merge_table; goto err_free_merge_table;
......
...@@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, ...@@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
u64 parent_ctx = 0; u64 parent_ctx = 0;
int err; int err;
ASSERT_RTNL();
if (sub_flow1 == sub_flow2 || if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) || nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2)) nfp_flower_is_merge_flow(sub_flow2))
...@@ -1727,19 +1725,30 @@ static int ...@@ -1727,19 +1725,30 @@ static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flower) struct flow_cls_offload *flower)
{ {
struct nfp_flower_priv *priv = app->priv;
int ret;
if (!eth_proto_is_802_3(flower->common.protocol)) if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP; return -EOPNOTSUPP;
mutex_lock(&priv->nfp_fl_lock);
switch (flower->command) { switch (flower->command) {
case FLOW_CLS_REPLACE: case FLOW_CLS_REPLACE:
return nfp_flower_add_offload(app, netdev, flower); ret = nfp_flower_add_offload(app, netdev, flower);
break;
case FLOW_CLS_DESTROY: case FLOW_CLS_DESTROY:
return nfp_flower_del_offload(app, netdev, flower); ret = nfp_flower_del_offload(app, netdev, flower);
break;
case FLOW_CLS_STATS: case FLOW_CLS_STATS:
return nfp_flower_get_stats(app, netdev, flower); ret = nfp_flower_get_stats(app, netdev, flower);
break;
default: default:
return -EOPNOTSUPP; ret = -EOPNOTSUPP;
break;
} }
mutex_unlock(&priv->nfp_fl_lock);
return ret;
} }
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
...@@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, ...@@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
repr_priv = repr->app_priv; repr_priv = repr->app_priv;
repr_priv->block_shared = f->block_shared; repr_priv->block_shared = f->block_shared;
f->driver_block_list = &nfp_block_cb_list; f->driver_block_list = &nfp_block_cb_list;
f->unlocked_driver_cb = true;
switch (f->command) { switch (f->command) {
case FLOW_BLOCK_BIND: case FLOW_BLOCK_BIND:
...@@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str ...@@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
nfp_flower_internal_port_can_offload(app, netdev))) nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
f->unlocked_driver_cb = true;
switch (f->command) { switch (f->command) {
case FLOW_BLOCK_BIND: case FLOW_BLOCK_BIND:
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
......
...@@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
{ {
struct netlink_ext_ack *extack = flow->common.extack; struct netlink_ext_ack *extack = flow->common.extack;
struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_priv *fl_priv = app->priv;
int ret;
if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) { if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
mutex_lock(&fl_priv->nfp_fl_lock);
switch (flow->command) { switch (flow->command) {
case TC_CLSMATCHALL_REPLACE: case TC_CLSMATCHALL_REPLACE:
return nfp_flower_install_rate_limiter(app, netdev, flow, ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack);
extack); break;
case TC_CLSMATCHALL_DESTROY: case TC_CLSMATCHALL_DESTROY:
return nfp_flower_remove_rate_limiter(app, netdev, flow, ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack);
extack); break;
case TC_CLSMATCHALL_STATS: case TC_CLSMATCHALL_STATS:
return nfp_flower_stats_rate_limiter(app, netdev, flow, ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack);
extack); break;
default: default:
return -EOPNOTSUPP; ret = -EOPNOTSUPP;
break;
} }
mutex_unlock(&fl_priv->nfp_fl_lock);
return ret;
} }
/* Offload tc action, currently only for tc police */ /* Offload tc action, currently only for tc police */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment