Commit 8dc07fdb authored by Willem de Bruijn's avatar Willem de Bruijn Committed by David S. Miller

net-tc: convert tc_at to tc_at_ingress

Field tc_at is used only within tc actions to distinguish ingress from
egress processing. A single bit is sufficient for this purpose.
Signed-off-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a5135bcf
......@@ -590,6 +590,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
* @tc_skip_classify: do not classify packet. set by IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
......@@ -751,7 +752,7 @@ struct sk_buff {
#endif
#ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1;
__u8 tc_at:2;
__u8 tc_at_ingress:1;
__u8 tc_from:2;
#endif
......
......@@ -412,7 +412,6 @@ int skb_do_redirect(struct sk_buff *);
static inline void skb_reset_tc(struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
skb->tc_at = 0;
skb->tc_from = 0;
#endif
}
......@@ -420,7 +419,7 @@ static inline void skb_reset_tc(struct sk_buff *skb)
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
return skb->tc_at & AT_INGRESS;
return skb->tc_at_ingress;
#else
return false;
#endif
......
......@@ -3153,9 +3153,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
if (!cl)
return skb;
/* skb->tc_at and qdisc_skb_cb(skb)->pkt_len were already set
* earlier by the caller.
*/
/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
qdisc_bstats_cpu_update(cl->q, skb);
switch (tc_classify(skb, cl, &cl_res, false)) {
......@@ -3320,7 +3318,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
qdisc_pkt_len_init(skb);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_at = AT_EGRESS;
skb->tc_at_ingress = 0;
# ifdef CONFIG_NET_EGRESS
if (static_key_false(&egress_needed)) {
skb = sch_handle_egress(skb, &rc, dev);
......@@ -3920,7 +3918,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
}
qdisc_skb_cb(skb)->pkt_len = skb->len;
skb->tc_at = AT_INGRESS;
skb->tc_at_ingress = 1;
qdisc_bstats_cpu_update(cl->q, skb);
switch (tc_classify(skb, cl, &cl_res, false)) {
......
......@@ -39,15 +39,15 @@ static bool tcf_mirred_is_act_redirect(int action)
return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
}
static u32 tcf_mirred_act_direction(int action)
static bool tcf_mirred_act_wants_ingress(int action)
{
switch (action) {
case TCA_EGRESS_REDIR:
case TCA_EGRESS_MIRROR:
return AT_EGRESS;
return false;
case TCA_INGRESS_REDIR:
case TCA_INGRESS_MIRROR:
return AT_INGRESS;
return true;
default:
BUG();
}
......@@ -198,7 +198,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
* and devices expect a mac header on xmit, then mac push/pull is
* needed.
*/
if (skb->tc_at != tcf_mirred_act_direction(m_eaction) &&
if (skb_at_tc_ingress(skb) != tcf_mirred_act_wants_ingress(m_eaction) &&
m_mac_header_xmit) {
if (!skb_at_tc_ingress(skb)) {
/* caught at egress, act ingress: pull mac */
......@@ -212,11 +212,11 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
/* mirror is always swallowed */
if (tcf_mirred_is_act_redirect(m_eaction))
skb2->tc_from = skb2->tc_at;
skb2->tc_from = skb_at_tc_ingress(skb) ? AT_INGRESS : AT_EGRESS;
skb2->skb_iif = skb->dev->ifindex;
skb2->dev = dev;
if (tcf_mirred_act_direction(m_eaction) & AT_EGRESS)
if (!tcf_mirred_act_wants_ingress(m_eaction))
err = dev_queue_xmit(skb2);
else
err = netif_receive_skb(skb2);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment