Commit 34802d06 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'extend-action-skbedit-to-rx-queue-mapping'

Amritha Nambiar says:

====================
Extend action skbedit to RX queue mapping

Based on the discussion on
https://lore.kernel.org/netdev/166260012413.81018.8010396115034847972.stgit@anambiarhost.jf.intel.com/ ,
the following series extends skbedit tc action to RX queue mapping.
Currently, skbedit action in tc allows overriding of transmit queue.
Extending this ability of skedit action supports the selection of
receive queue for incoming packets. On the receive side, this action
is supported only in hardware, so the skip_sw flag is enforced.

Enabled ice driver to offload this type of filter into the hardware
for accepting packets to the device's receive queue.
====================

Link: https://lore.kernel.org/r/166633888716.52141.3425659377117969638.stgit@anambiarhost.jf.intel.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 6143eca3 d5ae8ecf
......@@ -104,6 +104,7 @@ Contents:
switchdev
sysfs-tagging
tc-actions-env-rules
tc-queue-filters
tcp-thin
team
timestamping
......
.. SPDX-License-Identifier: GPL-2.0
=========================
TC queue based filtering
=========================
TC can be used for directing traffic to either a set of queues or
to a single queue on both the transmit and receive side.
On the transmit side:
1) TC filter directing traffic to a set of queues is achieved
using the action skbedit priority for Tx priority selection,
the priority maps to a traffic class (set of queues) when
the queue-sets are configured using mqprio.
2) TC filter directs traffic to a transmit queue with the action
skbedit queue_mapping $tx_qid. The action skbedit queue_mapping
for transmit queue is executed in software only and cannot be
offloaded.
Likewise, on the receive side, the two filters for selecting set of
queues and/or a single queue are supported as below:
1) TC flower filter directs incoming traffic to a set of queues using
the 'hw_tc' option.
hw_tc $TCID - Specify a hardware traffic class to pass matching
packets on to. TCID is in the range 0 through 15.
2) TC filter with action skbedit queue_mapping $rx_qid selects a
receive queue. The action skbedit queue_mapping for receive queue
is supported only in hardware. Multiple filters may compete in
the hardware for queue selection. In such case, the hardware
pipeline resolves conflicts based on priority. On Intel E810
devices, TC filter directing traffic to a queue have higher
priority over flow director filter assigning a queue. The hash
filter has lowest priority.
......@@ -137,6 +137,21 @@
*/
#define ICE_BW_KBPS_DIVISOR 125
/* Default recipes have priority 4 and below, hence priority values between 5..7
* can be used as filter priority for advanced switch filter (advanced switch
* filters need new recipe to be created for specified extraction sequence
* because default recipe extraction sequence does not represent custom
* extraction)
*/
#define ICE_SWITCH_FLTR_PRIO_QUEUE 7
/* prio 6 is reserved for future use (e.g. switch filter with L3 fields +
* (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as
* SYN/FIN/RST))
*/
#define ICE_SWITCH_FLTR_PRIO_RSVD 6
#define ICE_SWITCH_FLTR_PRIO_VSI 5
#define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
......
......@@ -8283,7 +8283,7 @@ static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
rule.rid = fltr->rid;
rule.rule_id = fltr->rule_id;
rule.vsi_handle = fltr->dest_id;
rule.vsi_handle = fltr->dest_vsi_handle;
status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
if (status) {
if (status == -ENOENT)
......
This diff is collapsed.
......@@ -45,7 +45,20 @@ struct ice_indr_block_priv {
};
struct ice_tc_flower_action {
u32 tc_class;
/* forward action specific params */
union {
struct {
u32 tc_class; /* forward to hw_tc */
u32 rsvd;
} tc;
struct {
u16 queue; /* forward to queue */
/* To add filter in HW, absolute queue number in global
* space of queues (between 0...N) is needed
*/
u16 hw_queue;
} q;
} fwd;
enum ice_sw_fwd_act_type fltr_act;
};
......@@ -131,11 +144,11 @@ struct ice_tc_flower_fltr {
*/
u16 rid;
u16 rule_id;
/* this could be queue/vsi_idx (sw handle)/queue_group, depending upon
* destination type
/* VSI handle of the destination VSI (it could be main PF VSI, CHNL_VSI,
* VF VSI)
*/
u16 dest_id;
/* if dest_id is vsi_idx, then need to store destination VSI ptr */
u16 dest_vsi_handle;
/* ptr to destination VSI */
struct ice_vsi *dest_vsi;
/* direction of fltr for eswitch use case */
enum ice_eswitch_fltr_direction direction;
......@@ -162,12 +175,23 @@ struct ice_tc_flower_fltr {
* @f: Pointer to tc-flower filter
*
* Criteria to determine of given filter is valid channel filter
* or not is based on its "destination". If destination is hw_tc (aka tc_class)
* and it is non-zero, then it is valid channel (aka ADQ) filter
* or not is based on its destination.
* For forward to VSI action, if destination is valid hw_tc (aka tc_class)
* and in supported range of TCs for ADQ, then return true.
* For forward to queue, as long as dest_vsi is valid and it is of type
* VSI_CHNL (PF ADQ VSI is of type VSI_CHNL), return true.
* NOTE: For forward to queue, correct dest_vsi is still set in tc_fltr based
* on destination queue specified.
*/
static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)
{
return !!f->action.tc_class;
if (f->action.fltr_act == ICE_FWD_TO_VSI)
return f->action.fwd.tc.tc_class >= ICE_CHNL_START_TC &&
f->action.fwd.tc.tc_class < ICE_CHNL_MAX_TC;
else if (f->action.fltr_act == ICE_FWD_TO_Q)
return f->dest_vsi && f->dest_vsi->type == ICE_VSI_CHNL;
return false;
}
/**
......
......@@ -67,6 +67,7 @@ struct tc_action {
#define TCA_ACT_FLAGS_BIND (1U << (TCA_ACT_FLAGS_USER_BITS + 1))
#define TCA_ACT_FLAGS_REPLACE (1U << (TCA_ACT_FLAGS_USER_BITS + 2))
#define TCA_ACT_FLAGS_NO_RTNL (1U << (TCA_ACT_FLAGS_USER_BITS + 3))
#define TCA_ACT_FLAGS_AT_INGRESS (1U << (TCA_ACT_FLAGS_USER_BITS + 4))
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
......
......@@ -155,6 +155,7 @@ enum flow_action_id {
FLOW_ACTION_MARK,
FLOW_ACTION_PTYPE,
FLOW_ACTION_PRIORITY,
FLOW_ACTION_RX_QUEUE_MAPPING,
FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE,
......@@ -247,6 +248,7 @@ struct flow_action_entry {
u32 csum_flags; /* FLOW_ACTION_CSUM */
u32 mark; /* FLOW_ACTION_MARK */
u16 ptype; /* FLOW_ACTION_PTYPE */
u16 rx_queue; /* FLOW_ACTION_RX_QUEUE_MAPPING */
u32 priority; /* FLOW_ACTION_PRIORITY */
struct { /* FLOW_ACTION_QUEUE */
u32 ctx;
......
......@@ -95,12 +95,41 @@ static inline u32 tcf_skbedit_priority(const struct tc_action *a)
return priority;
}
static inline u16 tcf_skbedit_rx_queue_mapping(const struct tc_action *a)
{
u16 rx_queue;
rcu_read_lock();
rx_queue = rcu_dereference(to_skbedit(a)->params)->queue_mapping;
rcu_read_unlock();
return rx_queue;
}
/* Return true iff action is queue_mapping */
static inline bool is_tcf_skbedit_queue_mapping(const struct tc_action *a)
{
return is_tcf_skbedit_with_flag(a, SKBEDIT_F_QUEUE_MAPPING);
}
/* Return true if action is on ingress traffic */
static inline bool is_tcf_skbedit_ingress(u32 flags)
{
return flags & TCA_ACT_FLAGS_AT_INGRESS;
}
static inline bool is_tcf_skbedit_tx_queue_mapping(const struct tc_action *a)
{
return is_tcf_skbedit_queue_mapping(a) &&
!is_tcf_skbedit_ingress(a->tcfa_flags);
}
static inline bool is_tcf_skbedit_rx_queue_mapping(const struct tc_action *a)
{
return is_tcf_skbedit_queue_mapping(a) &&
is_tcf_skbedit_ingress(a->tcfa_flags);
}
/* Return true iff action is inheritdsfield */
static inline bool is_tcf_skbedit_inheritdsfield(const struct tc_action *a)
{
......
......@@ -148,6 +148,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
}
if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
if (is_tcf_skbedit_ingress(act_flags) &&
!(act_flags & TCA_ACT_FLAGS_SKIP_SW)) {
NL_SET_ERR_MSG_MOD(extack, "\"queue_mapping\" option on receive side is hardware only, use skip_sw");
return -EOPNOTSUPP;
}
flags |= SKBEDIT_F_QUEUE_MAPPING;
queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
}
......@@ -374,9 +379,12 @@ static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data
} else if (is_tcf_skbedit_priority(act)) {
entry->id = FLOW_ACTION_PRIORITY;
entry->priority = tcf_skbedit_priority(act);
} else if (is_tcf_skbedit_queue_mapping(act)) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used");
} else if (is_tcf_skbedit_tx_queue_mapping(act)) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used on transmit side");
return -EOPNOTSUPP;
} else if (is_tcf_skbedit_rx_queue_mapping(act)) {
entry->id = FLOW_ACTION_RX_QUEUE_MAPPING;
entry->rx_queue = tcf_skbedit_rx_queue_mapping(act);
} else if (is_tcf_skbedit_inheritdsfield(act)) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used");
return -EOPNOTSUPP;
......@@ -394,6 +402,8 @@ static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data
fl_action->id = FLOW_ACTION_PTYPE;
else if (is_tcf_skbedit_priority(act))
fl_action->id = FLOW_ACTION_PRIORITY;
else if (is_tcf_skbedit_rx_queue_mapping(act))
fl_action->id = FLOW_ACTION_RX_QUEUE_MAPPING;
else
return -EOPNOTSUPP;
}
......
......@@ -1953,6 +1953,11 @@ static void tfilter_put(struct tcf_proto *tp, void *fh)
tp->ops->put(tp, fh);
}
static bool is_qdisc_ingress(__u32 classid)
{
return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
}
static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack)
{
......@@ -2144,6 +2149,8 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
flags |= TCA_ACT_FLAGS_REPLACE;
if (!rtnl_held)
flags |= TCA_ACT_FLAGS_NO_RTNL;
if (is_qdisc_ingress(parent))
flags |= TCA_ACT_FLAGS_AT_INGRESS;
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
flags, extack);
if (err == 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment