Commit 07def463 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-add-TC-MATCHALL-classifier-offload'

Rahul Lakkireddy says:

====================
cxgb4: add TC-MATCHALL classifier offload

This series of patches add support to offload TC-MATCHALL classifier
to hardware to classify all outgoing and incoming traffic on the
underlying port. Only 1 egress and 1 ingress rule each can be
offloaded on the underlying port.

Patch 1 adds support for TC-MATCHALL classifier offload on the egress
side. TC-POLICE is the only action that can be offloaded on the egress
side and is used to rate limit all outgoing traffic to specified max
rate.

Patch 2 adds logic to reject the current rule offload if its priority
conflicts with existing rules in the TCAM.

Patch 3 adds support for TC-MATCHALL classifier offload on the ingress
side. The same set of actions supported by existing TC-FLOWER
classifier offload can be applied on all the incoming traffic.

v5:
- Fixed commit message and comment to include comparison for equal
  priority in patch 2.

v4:
- Removed check in patch 1 to reject police offload if prio is not 1.
- Moved TC_SETUP_BLOCK code to separate function in patch 1.
- Added logic to ensure the prio passed by TC doesn't conflict with
  other rules in TCAM in patch 2.
- Higher index has lower priority than lower index in TCAM. So, rework
  cxgb4_get_free_ftid() to search free index from end of TCAM in
  descending order in patch 2.
- Added check to ensure the matchall rule's prio doesn't conflict with
  other rules in TCAM in patch 3.
- Added logic to fill default mask for VIID, if none has been
  provided, to prevent conflict with duplicate VIID rules in patch 3.
- Used existing variables in private structure to fill VIID info,
  instead of extracting the info manually in patch 3.

v3:
- Added check in patch 1 to reject police offload if prio is not 1.
- Assign block_shared variable only for TC_SETUP_BLOCK in patch 1.

v2:
- Added check to reject flow block sharing for policers in patch 1.
- Removed logic to fetch free index from end of TCAM in patch 2.
  Must maintain the same ordering as in kernel.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 77c05d2f 21c4c60b
......@@ -8,7 +8,8 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \
cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o
cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o \
cxgb4_tc_matchall.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
......
......@@ -603,6 +603,8 @@ struct port_info {
u8 vivld;
u8 smt_idx;
u8 rx_cchan;
bool tc_block_shared;
};
struct dentry;
......@@ -1101,6 +1103,9 @@ struct adapter {
/* TC MQPRIO offload */
struct cxgb4_tc_mqprio *tc_mqprio;
/* TC MATCHALL classifier offload */
struct cxgb4_tc_matchall *tc_matchall;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
......@@ -1130,6 +1135,7 @@ enum {
enum {
SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */
SCHED_CLASS_LEVEL_CH_RL = 2, /* channel rate limiter */
};
enum {
......@@ -1280,8 +1286,11 @@ struct ch_filter_specification {
u16 nat_lport; /* local port to use after NAT'ing */
u16 nat_fport; /* foreign port to use after NAT'ing */
u32 tc_prio; /* TC's filter priority index */
u64 tc_cookie; /* Unique cookie identifying TC rules */
/* reservation for future additions */
u8 rsvd[24];
u8 rsvd[12];
/* Filter rule value/mask pairs.
*/
......
......@@ -440,36 +440,48 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
{
struct adapter *adap = netdev2adap(dev);
struct tid_info *t = &adap->tids;
bool found = false;
u8 i, n, cnt;
int ftid;
spin_lock_bh(&t->ftid_lock);
if (family == PF_INET) {
ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
if (ftid >= t->nftids)
ftid = -1;
} else {
if (is_t6(adap->params.chip)) {
ftid = bitmap_find_free_region(t->ftid_bmap,
t->nftids, 1);
if (ftid < 0)
goto out_unlock;
/* this is only a lookup, keep the found region
* unallocated
*/
bitmap_release_region(t->ftid_bmap, ftid, 1);
} else {
ftid = bitmap_find_free_region(t->ftid_bmap,
t->nftids, 2);
if (ftid < 0)
goto out_unlock;
/* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
* on T5.
*/
n = 1;
if (family == PF_INET6) {
n++;
if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
n += 2;
}
if (n > t->nftids)
return -ENOMEM;
bitmap_release_region(t->ftid_bmap, ftid, 2);
/* Find free filter slots from the end of TCAM. Appropriate
* checks must be done by caller later to ensure the prio
* passed by TC doesn't conflict with prio saved by existing
* rules in the TCAM.
*/
spin_lock_bh(&t->ftid_lock);
ftid = t->nftids - 1;
while (ftid >= n - 1) {
cnt = 0;
for (i = 0; i < n; i++) {
if (test_bit(ftid - i, t->ftid_bmap))
break;
cnt++;
}
if (cnt == n) {
ftid &= ~(n - 1);
found = true;
break;
}
ftid -= n;
}
out_unlock:
spin_unlock_bh(&t->ftid_lock);
return ftid;
return found ? ftid : -ENOMEM;
}
static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
......@@ -510,6 +522,60 @@ static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
spin_unlock_bh(&t->ftid_lock);
}
bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
{
struct adapter *adap = netdev2adap(dev);
struct filter_entry *prev_fe, *next_fe;
struct tid_info *t = &adap->tids;
u32 prev_ftid, next_ftid;
bool valid = true;
/* Only insert the rule if both of the following conditions
* are met:
* 1. The immediate previous rule has priority <= @prio.
* 2. The immediate next rule has priority >= @prio.
*/
spin_lock_bh(&t->ftid_lock);
/* Don't insert if there's a rule already present at @idx. */
if (test_bit(idx, t->ftid_bmap)) {
valid = false;
goto out_unlock;
}
next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
if (next_ftid >= t->nftids)
next_ftid = idx;
next_fe = &adap->tids.ftid_tab[next_ftid];
prev_ftid = find_last_bit(t->ftid_bmap, idx);
if (prev_ftid >= idx)
prev_ftid = idx;
/* See if the filter entry belongs to an IPv6 rule, which
* occupy 4 slots on T5 and 2 slots on T6. Adjust the
* reference to the previously inserted filter entry
* accordingly.
*/
if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) {
prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3];
if (!prev_fe->fs.type)
prev_fe = &adap->tids.ftid_tab[prev_ftid];
} else {
prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1];
if (!prev_fe->fs.type)
prev_fe = &adap->tids.ftid_tab[prev_ftid];
}
if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) ||
(next_fe->valid && prio > next_fe->fs.tc_prio))
valid = false;
out_unlock:
spin_unlock_bh(&t->ftid_lock);
return valid;
}
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
......@@ -806,6 +872,12 @@ static void fill_default_mask(struct ch_filter_specification *fs)
fs->mask.tos |= ~0;
if (fs->val.proto && !fs->mask.proto)
fs->mask.proto |= ~0;
if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
fs->mask.pfvf_vld |= ~0;
if (fs->val.pf && !fs->mask.pf)
fs->mask.pf |= ~0;
if (fs->val.vf && !fs->mask.vf)
fs->mask.vf |= ~0;
for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
lip |= fs->val.lip[i];
......
......@@ -53,4 +53,5 @@ void clear_all_filters(struct adapter *adapter);
void init_hash_filter(struct adapter *adap);
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs);
bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio);
#endif /* __CXGB4_FILTER_H */
......@@ -84,6 +84,7 @@
#include "cxgb4_tc_u32.h"
#include "cxgb4_tc_flower.h"
#include "cxgb4_tc_mqprio.h"
#include "cxgb4_tc_matchall.h"
#include "cxgb4_ptp.h"
#include "cxgb4_cudbg.h"
......@@ -3234,8 +3235,33 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev,
}
}
static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
static int cxgb_setup_tc_matchall(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall,
bool ingress)
{
struct adapter *adap = netdev2adap(dev);
if (!adap->tc_matchall)
return -ENOMEM;
switch (cls_matchall->command) {
case TC_CLSMATCHALL_REPLACE:
return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
case TC_CLSMATCHALL_DESTROY:
return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
case TC_CLSMATCHALL_STATS:
if (ingress)
return cxgb4_tc_matchall_stats(dev, cls_matchall);
break;
default:
break;
}
return -EOPNOTSUPP;
}
static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct net_device *dev = cb_priv;
struct port_info *pi = netdev2pinfo(dev);
......@@ -3256,11 +3282,40 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return cxgb_setup_tc_cls_u32(dev, type_data);
case TC_SETUP_CLSFLOWER:
return cxgb_setup_tc_flower(dev, type_data);
case TC_SETUP_CLSMATCHALL:
return cxgb_setup_tc_matchall(dev, type_data, true);
default:
return -EOPNOTSUPP;
}
}
static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct net_device *dev = cb_priv;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to setup tc on port %d. Link Down?\n",
pi->port_id);
return -EINVAL;
}
if (!tc_cls_can_offload_and_chain0(dev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSMATCHALL:
return cxgb_setup_tc_matchall(dev, type_data, false);
default:
break;
}
return -EOPNOTSUPP;
}
static int cxgb_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
......@@ -3274,19 +3329,34 @@ static int cxgb_setup_tc_mqprio(struct net_device *dev,
static LIST_HEAD(cxgb_block_cb_list);
static int cxgb_setup_tc_block(struct net_device *dev,
struct flow_block_offload *f)
{
struct port_info *pi = netdev_priv(dev);
flow_setup_cb_t *cb;
bool ingress_only;
pi->tc_block_shared = f->block_shared;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
cb = cxgb_setup_tc_block_egress_cb;
ingress_only = false;
} else {
cb = cxgb_setup_tc_block_ingress_cb;
ingress_only = true;
}
return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
cb, pi, dev, ingress_only);
}
static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct port_info *pi = netdev2pinfo(dev);
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return cxgb_setup_tc_mqprio(dev, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&cxgb_block_cb_list,
cxgb_setup_tc_block_cb,
pi, dev, true);
return cxgb_setup_tc_block(dev, type_data);
default:
return -EOPNOTSUPP;
}
......@@ -5741,6 +5811,7 @@ static void free_some_resources(struct adapter *adapter)
kvfree(adapter->srq);
t4_cleanup_sched(adapter);
kvfree(adapter->tids.tid_tab);
cxgb4_cleanup_tc_matchall(adapter);
cxgb4_cleanup_tc_mqprio(adapter);
cxgb4_cleanup_tc_flower(adapter);
cxgb4_cleanup_tc_u32(adapter);
......@@ -6315,6 +6386,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cxgb4_init_tc_mqprio(adapter))
dev_warn(&pdev->dev,
"could not offload tc mqprio, continuing\n");
if (cxgb4_init_tc_matchall(adapter))
dev_warn(&pdev->dev,
"could not offload tc matchall, continuing\n");
}
if (is_offload(adapter) || is_hashfilter(adapter)) {
......
......@@ -378,15 +378,14 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
}
static void cxgb4_process_flow_actions(struct net_device *in,
struct flow_cls_offload *cls,
struct ch_filter_specification *fs)
void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
int i;
flow_action_for_each(i, act, &rule->action) {
flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
fs->action = FILTER_PASS;
......@@ -544,17 +543,16 @@ static bool valid_pedit_action(struct net_device *dev,
return true;
}
static int cxgb4_validate_flow_actions(struct net_device *dev,
struct flow_cls_offload *cls)
int cxgb4_validate_flow_actions(struct net_device *dev,
struct flow_action *actions)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
int i;
flow_action_for_each(i, act, &rule->action) {
flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_DROP:
......@@ -636,14 +634,15 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct netlink_ext_ack *extack = cls->common.extack;
struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_entry *ch_flower;
struct ch_filter_specification *fs;
struct filter_ctx ctx;
int fidx;
int ret;
int fidx, ret;
if (cxgb4_validate_flow_actions(dev, cls))
if (cxgb4_validate_flow_actions(dev, &rule->action))
return -EOPNOTSUPP;
if (cxgb4_validate_flow_match(dev, cls))
......@@ -658,20 +657,41 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
fs = &ch_flower->fs;
fs->hitcnts = 1;
cxgb4_process_flow_match(dev, cls, fs);
cxgb4_process_flow_actions(dev, cls, fs);
cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs);
if (fs->hash) {
fidx = 0;
} else {
fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
if (fidx < 0) {
netdev_err(dev, "%s: No fidx for offload.\n", __func__);
u8 inet_family;
inet_family = fs->type ? PF_INET6 : PF_INET;
/* Note that TC uses prio 0 to indicate stack to
* generate automatic prio and hence doesn't pass prio
* 0 to driver. However, the hardware TCAM index
* starts from 0. Hence, the -1 here.
*/
if (cls->common.prio <= adap->tids.nftids)
fidx = cls->common.prio - 1;
else
fidx = cxgb4_get_free_ftid(dev, inet_family);
/* Only insert FLOWER rule if its priority doesn't
* conflict with existing rules in the LETCAM.
*/
if (fidx < 0 ||
!cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
ret = -ENOMEM;
goto free_entry;
}
}
fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie;
init_completion(&ctx.completion);
ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
if (ret) {
......
......@@ -108,6 +108,12 @@ struct ch_tc_pedit_fields {
#define PEDIT_TCP_SPORT_DPORT 0x0
#define PEDIT_UDP_SPORT_DPORT 0x0
void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs);
int cxgb4_validate_flow_actions(struct net_device *dev,
struct flow_action *actions);
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls);
int cxgb4_tc_flower_destroy(struct net_device *dev,
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
#ifndef __CXGB4_TC_MATCHALL_H__
#define __CXGB4_TC_MATCHALL_H__
#include <net/pkt_cls.h>
enum cxgb4_matchall_state {
CXGB4_MATCHALL_STATE_DISABLED = 0,
CXGB4_MATCHALL_STATE_ENABLED,
};
struct cxgb4_matchall_egress_entry {
enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
u8 hwtc; /* Traffic class bound to port */
u64 cookie; /* Used to identify the MATCHALL rule offloaded */
};
struct cxgb4_matchall_ingress_entry {
enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
u32 tid; /* Index to hardware filter entry */
struct ch_filter_specification fs; /* Filter entry */
u64 bytes; /* # of bytes hitting the filter */
u64 packets; /* # of packets hitting the filter */
u64 last_used; /* Last updated jiffies time */
};
struct cxgb4_tc_port_matchall {
struct cxgb4_matchall_egress_entry egress; /* Egress offload info */
struct cxgb4_matchall_ingress_entry ingress; /* Ingress offload info */
};
struct cxgb4_tc_matchall {
struct cxgb4_tc_port_matchall *port_matchall; /* Per port entry */
};
int cxgb4_tc_matchall_replace(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall,
bool ingress);
int cxgb4_tc_matchall_destroy(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall,
bool ingress);
int cxgb4_tc_matchall_stats(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall);
int cxgb4_init_tc_matchall(struct adapter *adap);
void cxgb4_cleanup_tc_matchall(struct adapter *adap);
#endif /* __CXGB4_TC_MATCHALL_H__ */
......@@ -11,8 +11,7 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
u64 min_rate = 0, max_rate = 0, max_link_rate;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
u32 qcount = 0, qoffset = 0;
u32 link_ok, speed, mtu;
u32 speed, qcount = 0, qoffset = 0;
int ret;
u8 i;
......@@ -35,7 +34,7 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
return -ERANGE;
}
ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
ret = t4_get_link_params(pi, NULL, &speed, NULL);
if (ret) {
netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
return -EINVAL;
......
......@@ -36,6 +36,7 @@
#include <net/tc_act/tc_mirred.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
#include "cxgb4_tc_u32_parse.h"
#include "cxgb4_tc_u32.h"
......@@ -148,6 +149,7 @@ static int fill_action_fields(struct adapter *adap,
int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
{
const struct cxgb4_match_field *start, *link_start = NULL;
struct netlink_ext_ack *extack = cls->common.extack;
struct adapter *adapter = netdev2adap(dev);
__be16 protocol = cls->common.protocol;
struct ch_filter_specification fs;
......@@ -164,14 +166,21 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
return -EOPNOTSUPP;
/* Fetch the location to insert the filter. */
filter_id = cls->knode.handle & 0xFFFFF;
/* Note that TC uses prio 0 to indicate stack to generate
* automatic prio and hence doesn't pass prio 0 to driver.
* However, the hardware TCAM index starts from 0. Hence, the
* -1 here.
*/
filter_id = TC_U32_NODE(cls->knode.handle) - 1;
if (filter_id > adapter->tids.nftids) {
dev_err(adapter->pdev_dev,
"Location %d out of range for insertion. Max: %d\n",
filter_id, adapter->tids.nftids);
return -ERANGE;
/* Only insert U32 rule if its priority doesn't conflict with
* existing rules in the LETCAM.
*/
if (filter_id >= adapter->tids.nftids ||
!cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
return -ENOMEM;
}
t = adapter->tc_u32;
......@@ -190,6 +199,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
memset(&fs, 0, sizeof(fs));
fs.tc_prio = cls->common.prio;
fs.tc_cookie = cls->knode.handle;
if (protocol == htons(ETH_P_IPV6)) {
start = cxgb4_ipv6_fields;
is_ipv6 = true;
......@@ -350,14 +362,10 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
return -EOPNOTSUPP;
/* Fetch the location to delete the filter. */
filter_id = cls->knode.handle & 0xFFFFF;
if (filter_id > adapter->tids.nftids) {
dev_err(adapter->pdev_dev,
"Location %d out of range for deletion. Max: %d\n",
filter_id, adapter->tids.nftids);
filter_id = TC_U32_NODE(cls->knode.handle) - 1;
if (filter_id >= adapter->tids.nftids ||
cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie)
return -ERANGE;
}
t = adapter->tc_u32;
handle = cls->knode.handle;
......
......@@ -50,6 +50,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
e = &s->tab[p->u.params.class];
switch (op) {
case SCHED_FW_OP_ADD:
case SCHED_FW_OP_DEL:
err = t4_sched_params(adap, p->type,
p->u.params.level, p->u.params.mode,
p->u.params.rateunit,
......@@ -188,10 +189,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
e = &pi->sched_tbl->tab[qe->param.class];
list_del(&qe->list);
kvfree(qe);
if (atomic_dec_and_test(&e->refcnt)) {
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
if (atomic_dec_and_test(&e->refcnt))
cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
}
return err;
}
......@@ -261,10 +260,8 @@ static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
e = &pi->sched_tbl->tab[fe->param.class];
list_del(&fe->list);
kvfree(fe);
if (atomic_dec_and_test(&e->refcnt)) {
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
if (atomic_dec_and_test(&e->refcnt))
cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
}
return err;
}
......@@ -469,10 +466,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
struct sched_class *found = NULL;
struct sched_class *e, *end;
/* Only allow tc to be shared among SCHED_FLOWC types. For
* other types, always allocate a new tc.
*/
if (!p || p->u.params.mode != SCHED_CLASS_MODE_FLOW) {
if (!p) {
/* Get any available unused class */
end = &s->tab[s->sched_size];
for (e = &s->tab[0]; e != end; ++e) {
......@@ -514,7 +508,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
struct ch_sched_params *p)
{
struct sched_class *e;
struct sched_class *e = NULL;
u8 class_id;
int err;
......@@ -529,10 +523,13 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (class_id != SCHED_CLS_NONE)
return NULL;
/* See if there's an exisiting class with same
* requested sched params
/* See if there's an exisiting class with same requested sched
* params. Classes can only be shared among FLOWC types. For
* other types, always request a new class.
*/
e = t4_sched_class_lookup(pi, p);
if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
e = t4_sched_class_lookup(pi, p);
if (!e) {
struct ch_sched_params np;
......@@ -592,10 +589,35 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_table *s = pi->sched_tbl;
struct ch_sched_params p;
struct sched_class *e;
u32 speed;
int ret;
e = &s->tab[classid];
if (!atomic_read(&e->refcnt)) {
if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
/* Port based rate limiting needs explicit reset back
* to max rate. But, we'll do explicit reset for all
* types, instead of just port based type, to be on
* the safer side.
*/
memcpy(&p, &e->info, sizeof(p));
/* Always reset mode to 0. Otherwise, FLOWC mode will
* still be enabled even after resetting the traffic
* class.
*/
p.u.params.mode = 0;
p.u.params.minrate = 0;
p.u.params.pktsize = 0;
ret = t4_get_link_params(pi, NULL, &speed, NULL);
if (!ret)
p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
else
p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
......
......@@ -52,6 +52,7 @@ enum {
enum sched_fw_ops {
SCHED_FW_OP_ADD,
SCHED_FW_OP_DEL,
};
enum sched_bind_type {
......
......@@ -8777,8 +8777,8 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
unsigned int *speedp, unsigned int *mtup)
{
unsigned int fw_caps = pi->adapter->params.fw_caps_support;
struct fw_port_cmd port_cmd;
unsigned int action, link_ok, mtu;
struct fw_port_cmd port_cmd;
fw_port_cap32_t linkattr;
int ret;
......@@ -8813,9 +8813,12 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
}
*link_okp = link_ok;
*speedp = fwcap_to_speed(linkattr);
*mtup = mtu;
if (link_okp)
*link_okp = link_ok;
if (speedp)
*speedp = fwcap_to_speed(linkattr);
if (mtup)
*mtup = mtu;
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment