Commit 244fd698 authored by Maor Dickman's avatar Maor Dickman Committed by Jakub Kicinski

net/mlx5e: TC, Extract indr setup block checks to function

In preparation for next patch which will add new check
if device block can be setup, extract all existing checks
to function to make it more readable and maintainable.
Signed-off-by: default avatarMaor Dickman <maord@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Link: https://lore.kernel.org/r/20230314054234.267365-14-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8a0594c0
...@@ -426,39 +426,53 @@ static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev) ...@@ -426,39 +426,53 @@ static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
return macvlan->mode == MACVLAN_MODE_PASSTHRU; return macvlan->mode == MACVLAN_MODE_PASSTHRU;
} }
static int static bool
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, mlx5e_rep_check_indr_block_supported(struct mlx5e_rep_priv *rpriv,
struct mlx5e_rep_priv *rpriv, struct net_device *netdev,
struct flow_block_offload *f, struct flow_block_offload *f)
flow_setup_cb_t *setup_cb,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{ {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
bool is_ovs_int_port = netif_is_ovs_master(netdev);
struct mlx5e_rep_indr_block_priv *indr_priv;
struct flow_block_cb *block_cb;
if (!mlx5e_tc_tun_device_to_offload(priv, netdev) && if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
!(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev) && f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
!is_ovs_int_port) { return false;
if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev))
return -EOPNOTSUPP; if (mlx5e_tc_tun_device_to_offload(priv, netdev))
return true;
if (is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)
return true;
if (netif_is_macvlan(netdev)) {
if (!mlx5e_rep_macvlan_mode_supported(netdev)) { if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode"); netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
return -EOPNOTSUPP; return false;
} }
if (macvlan_dev_real_dev(netdev) == rpriv->netdev)
return true;
} }
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && if (netif_is_ovs_master(netdev) && f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) mlx5e_tc_int_port_supported(esw))
return -EOPNOTSUPP; return true;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && !is_ovs_int_port) return false;
return -EOPNOTSUPP; }
static int
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
struct mlx5e_rep_priv *rpriv,
struct flow_block_offload *f,
flow_setup_cb_t *setup_cb,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
struct mlx5e_rep_indr_block_priv *indr_priv;
struct flow_block_cb *block_cb;
if (is_ovs_int_port && !mlx5e_tc_int_port_supported(esw)) if (!mlx5e_rep_check_indr_block_supported(rpriv, netdev, f))
return -EOPNOTSUPP; return -EOPNOTSUPP;
f->unlocked_driver_cb = true; f->unlocked_driver_cb = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment