Commit 81e696b5 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-flower-tc-block-support-and-nfp-PCI-updates'

Jakub Kicinski says:

====================
nfp: flower tc block support and nfp PCI updates

This series improves the nfp PCIe code by making use of the new
pcie_print_link_status() helper and resetting NFP locks when
driver loads.  This can help us avoid lock ups after host crashes
and is rebooted with PCIe reset or when kdump kernel is loaded.

The flower changes come from John, he says:

This patchset fixes offload issues when multiple repr netdevs are bound to
a tc block and filter rules added. Previously the rule would be passed to
the reprs and would be rejected in all but the first as the cookie value
will indicate a duplicate. The first patch extends the flow lookup
function to consider both host context and ingress netdev along with the
cookie value. This means that a rule with a given cookie can exist
multiple times assuming the ingress netdev is different. The host context
ensures that stats from fw are associated with the correct instance of the
rule.

The second patch protects against rejecting add/del/stat messages when a
rule has a repr as both an ingress port and an egress dev. In such cases a
callback can be triggered twice (once for ingress and once for egress)
and can lead to duplicate rule detection or incorrect double calls.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c77bbc64 c50647d3
......@@ -47,6 +47,7 @@
struct net_device;
struct nfp_app;
#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ENTRY_RS BIT(20)
#define NFP_FL_STATS_ELEM_RS 4
#define NFP_FL_REPEATED_HASH_MAX BIT(17)
......@@ -189,9 +190,11 @@ struct nfp_fl_payload {
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
__be32 nfp_tun_ipv4_addr;
struct net_device *ingress_dev;
char *unmasked_data;
char *mask_data;
char *action_data;
bool ingress_offload;
};
struct nfp_fl_stats_frame {
......@@ -216,12 +219,14 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_flow);
int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_flow);
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev);
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow);
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev, __be32 host_ctx);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
......
......@@ -99,14 +99,18 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie)
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev, __be32 host_ctx)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flower_entry;
hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
tc_flower_cookie)
if (flower_entry->tc_flower_cookie == tc_flower_cookie)
if (flower_entry->tc_flower_cookie == tc_flower_cookie &&
(!netdev || flower_entry->ingress_dev == netdev) &&
(host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
flower_entry->meta.host_ctx_id == host_ctx))
return flower_entry;
return NULL;
......@@ -121,13 +125,11 @@ nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
flower_cookie = be64_to_cpu(stats->stats_cookie);
rcu_read_lock();
nfp_flow = nfp_flower_search_fl_table(app, flower_cookie);
nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
stats->stats_con_id);
if (!nfp_flow)
goto exit_rcu_unlock;
if (nfp_flow->meta.host_ctx_id != stats->stats_con_id)
goto exit_rcu_unlock;
spin_lock(&nfp_flow->lock);
nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
......@@ -317,7 +319,8 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_flow)
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry;
......@@ -348,7 +351,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
nfp_flow->stats.bytes = 0;
nfp_flow->stats.used = jiffies;
check_entry = nfp_flower_search_fl_table(app, flow->cookie);
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
NFP_FL_STATS_CTX_DONT_CARE);
if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
......
......@@ -345,7 +345,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
static struct nfp_fl_payload *
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
{
struct nfp_fl_payload *flow_pay;
......@@ -371,6 +371,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
flow_pay->ingress_offload = !egress;
return flow_pay;
err_free_mask:
......@@ -402,8 +404,20 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
struct net_device *ingr_dev;
int err;
ingr_dev = egress ? NULL : netdev;
flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
NFP_FL_STATS_CTX_DONT_CARE);
if (flow_pay) {
/* Ignore as duplicate if it has been added by different cb. */
if (flow_pay->ingress_offload && egress)
return 0;
else
return -EOPNOTSUPP;
}
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
......@@ -413,12 +427,14 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_free_key_ls;
flow_pay = nfp_flower_allocate_new(key_layer);
flow_pay = nfp_flower_allocate_new(key_layer, egress);
if (!flow_pay) {
err = -ENOMEM;
goto err_free_key_ls;
}
flow_pay->ingress_dev = egress ? NULL : netdev;
err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
tun_type);
if (err)
......@@ -428,7 +444,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
err = nfp_compile_flow_metadata(app, flow, flow_pay);
err = nfp_compile_flow_metadata(app, flow, flow_pay,
flow_pay->ingress_dev);
if (err)
goto err_destroy_flow;
......@@ -462,6 +479,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure
* @egress: Netdev is the egress dev.
*
* Removes a flow from the repeated hash structure and clears the
* action payload.
......@@ -470,15 +488,18 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow)
struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev;
int err;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
ingr_dev = egress ? NULL : netdev;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
NFP_FL_STATS_CTX_DONT_CARE);
if (!nfp_flow)
return -ENOENT;
return egress ? 0 : -ENOENT;
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
......@@ -505,7 +526,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
/**
* nfp_flower_get_stats() - Populates flow stats obtained from hardware.
* @app: Pointer to the APP handle
* @netdev: Netdev structure.
* @flow: TC flower classifier offload structure
* @egress: Netdev is the egress dev.
*
* Populates a flow statistics structure which which corresponds to a
* specific flow.
......@@ -513,14 +536,21 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
* Return: negative value on error, 0 if stats populated successfully.
*/
static int
nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
ingr_dev = egress ? NULL : netdev;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
NFP_FL_STATS_CTX_DONT_CARE);
if (!nfp_flow)
return -EINVAL;
if (nfp_flow->ingress_offload && egress)
return 0;
spin_lock_bh(&nfp_flow->lock);
tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
nfp_flow->stats.pkts, nfp_flow->stats.used);
......@@ -543,9 +573,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
case TC_CLSFLOWER_REPLACE:
return nfp_flower_add_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
return nfp_flower_del_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_STATS:
return nfp_flower_get_stats(app, flower);
return nfp_flower_get_stats(app, netdev, flower, egress);
}
return -EOPNOTSUPP;
......
......@@ -486,6 +486,10 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_disable_msix;
}
err = nfp_resource_table_init(pf->cpp);
if (err)
goto err_cpp_free;
pf->hwinfo = nfp_hwinfo_read(pf->cpp);
dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n",
......@@ -548,6 +552,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
vfree(pf->dumpspec);
err_hwinfo_free:
kfree(pf->hwinfo);
err_cpp_free:
nfp_cpp_free(pf->cpp);
err_disable_msix:
destroy_workqueue(pf->wq);
......
......@@ -94,6 +94,8 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
/* MAC Statistics Accumulator */
#define NFP_RESOURCE_MAC_STATISTICS "mac.stat"
int nfp_resource_table_init(struct nfp_cpp *cpp);
struct nfp_resource *
nfp_resource_acquire(struct nfp_cpp *cpp, const char *name);
......
......@@ -1330,6 +1330,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
/* Finished with card initialization. */
dev_info(&pdev->dev,
"Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
pcie_print_link_status(pdev);
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
if (!nfp) {
......
......@@ -295,6 +295,8 @@ void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
unsigned long long address);
/**
* nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle
......
......@@ -59,6 +59,11 @@ static u32 nfp_mutex_unlocked(u16 interface)
return (u32)interface << 16 | 0x0000;
}
static u32 nfp_mutex_owner(u32 val)
{
return val >> 16;
}
static bool nfp_mutex_is_locked(u32 val)
{
return (val & 0xffff) == 0x000f;
......@@ -351,3 +356,43 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
}
/**
* nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint
* @cpp: NFP CPP handle
* @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
* @address: Offset into the address space of the NFP CPP target ID
*
* Release lock if held by local system. Extreme care is advised, call only
* when no local lock users can exist.
*
* Return: 0 if the lock was OK, 1 if locked by us, -errno on invalid mutex
*/
int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
unsigned long long address)
{
const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
u16 interface = nfp_cpp_interface(cpp);
int err;
u32 tmp;
err = nfp_cpp_mutex_validate(interface, &target, address);
if (err)
return err;
/* Check lock */
err = nfp_cpp_readl(cpp, mur, address, &tmp);
if (err < 0)
return err;
if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface)
return 0;
/* Bust the lock */
err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface));
if (err < 0)
return err;
return 1;
}
......@@ -338,3 +338,62 @@ u64 nfp_resource_size(struct nfp_resource *res)
{
return res->size;
}
/**
* nfp_resource_table_init() - Run initial checks on the resource table
* @cpp: NFP CPP handle
*
* Start-of-day init procedure for resource table. Must be called before
* any local resource table users may exist.
*
* Return: 0 on success, -errno on failure
*/
int nfp_resource_table_init(struct nfp_cpp *cpp)
{
struct nfp_cpp_mutex *dev_mutex;
int i, err;
err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET,
NFP_RESOURCE_TBL_BASE);
if (err < 0) {
nfp_err(cpp, "Error: failed to reclaim resource table mutex\n");
return err;
}
if (err)
nfp_warn(cpp, "Warning: busted main resource table mutex\n");
dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
NFP_RESOURCE_TBL_BASE,
NFP_RESOURCE_TBL_KEY);
if (!dev_mutex)
return -ENOMEM;
if (nfp_cpp_mutex_lock(dev_mutex)) {
nfp_err(cpp, "Error: failed to claim resource table mutex\n");
nfp_cpp_mutex_free(dev_mutex);
return -EINVAL;
}
/* Resource 0 is the dev_mutex, start from 1 */
for (i = 1; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
u64 addr = NFP_RESOURCE_TBL_BASE +
sizeof(struct nfp_resource_entry) * i;
err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, addr);
if (err < 0) {
nfp_err(cpp,
"Error: failed to reclaim resource %d mutex\n",
i);
goto err_unlock;
}
if (err)
nfp_warn(cpp, "Warning: busted resource %d mutex\n", i);
}
err = 0;
err_unlock:
nfp_cpp_mutex_unlock(dev_mutex);
nfp_cpp_mutex_free(dev_mutex);
return err;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment