Commit 92101b3b authored by David S. Miller's avatar David S. Miller

ipv4: Prepare for change of rt->rt_iif encoding.

Use inet_iif() consistently, and for TCP record the input interface of
cached RX dst in inet sock.

rt->rt_iif is going to be encoded differently, so that we can
legitimately cache input routes in the FIB info more aggressively.

When the input interface is "use SKB device index" the rt->rt_iif will
be set to zero.

This forces us to move the TCP RX dst cache installation into the ipv4
specific code, and as well it should since doing the route caching for
ipv6 is pointless at the moment since it is not inspected in the ipv6
input paths yet.

Also, remove the unlikely on dst->obsolete, all ipv4 dsts have
obsolete set to a non-zero value to force invocation of the check
callback.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fe3edf45
...@@ -172,6 +172,7 @@ struct inet_sock { ...@@ -172,6 +172,7 @@ struct inet_sock {
int uc_index; int uc_index;
int mc_index; int mc_index;
__be32 mc_addr; __be32 mc_addr;
int rx_dst_ifindex;
struct ip_mc_socklist __rcu *mc_list; struct ip_mc_socklist __rcu *mc_list;
struct inet_cork_full cork; struct inet_cork_full cork;
}; };
......
...@@ -481,7 +481,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, ...@@ -481,7 +481,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
struct rtable *rt; struct rtable *rt;
const struct iphdr *iph = ip_hdr(skb); const struct iphdr *iph = ip_hdr(skb);
struct flowi4 fl4 = { struct flowi4 fl4 = {
.flowi4_oif = skb_rtable(skb)->rt_iif, .flowi4_oif = inet_iif(skb),
.daddr = iph->saddr, .daddr = iph->saddr,
.saddr = iph->daddr, .saddr = iph->daddr,
.flowi4_tos = RT_CONN_FLAGS(sk), .flowi4_tos = RT_CONN_FLAGS(sk),
......
...@@ -571,7 +571,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) ...@@ -571,7 +571,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
rcu_read_lock(); rcu_read_lock();
if (rt_is_input_route(rt) && if (rt_is_input_route(rt) &&
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
dev = dev_get_by_index_rcu(net, rt->rt_iif); dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
if (dev) if (dev)
saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
......
...@@ -1027,10 +1027,9 @@ static int do_ip_setsockopt(struct sock *sk, int level, ...@@ -1027,10 +1027,9 @@ static int do_ip_setsockopt(struct sock *sk, int level,
void ipv4_pktinfo_prepare(struct sk_buff *skb) void ipv4_pktinfo_prepare(struct sk_buff *skb)
{ {
struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
const struct rtable *rt = skb_rtable(skb);
if (rt) { if (skb_rtable(skb)) {
pktinfo->ipi_ifindex = rt->rt_iif; pktinfo->ipi_ifindex = inet_iif(skb);
pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb); pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
} else { } else {
pktinfo->ipi_ifindex = 0; pktinfo->ipi_ifindex = 0;
......
...@@ -848,7 +848,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) ...@@ -848,7 +848,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
if (log_martians && if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number) peer->rate_tokens == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, rt->rt_iif, &ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &rt->rt_gateway); &ip_hdr(skb)->daddr, &rt->rt_gateway);
#endif #endif
} }
......
...@@ -5391,18 +5391,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -5391,18 +5391,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (sk->sk_rx_dst) {
struct dst_entry *dst = sk->sk_rx_dst;
if (unlikely(dst->obsolete)) {
if (dst->ops->check(dst, 0) == NULL) {
dst_release(dst);
sk->sk_rx_dst = NULL;
}
}
}
if (unlikely(sk->sk_rx_dst == NULL))
sk->sk_rx_dst = dst_clone(skb_dst(skb));
/* /*
* Header prediction. * Header prediction.
* The code loosely follows the one in the famous * The code loosely follows the one in the famous
......
...@@ -1618,6 +1618,20 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1618,6 +1618,20 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb); sock_rps_save_rxhash(sk, skb);
if (sk->sk_rx_dst) {
struct dst_entry *dst = sk->sk_rx_dst;
if (dst->ops->check(dst, 0) == NULL) {
dst_release(dst);
sk->sk_rx_dst = NULL;
}
}
if (unlikely(sk->sk_rx_dst == NULL)) {
struct inet_sock *icsk = inet_sk(sk);
struct rtable *rt = skb_rtable(skb);
sk->sk_rx_dst = dst_clone(&rt->dst);
icsk->rx_dst_ifindex = inet_iif(skb);
}
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk; rsk = sk;
goto reset; goto reset;
...@@ -1700,16 +1714,14 @@ void tcp_v4_early_demux(struct sk_buff *skb) ...@@ -1700,16 +1714,14 @@ void tcp_v4_early_demux(struct sk_buff *skb)
skb->destructor = sock_edemux; skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) { if (sk->sk_state != TCP_TIME_WAIT) {
struct dst_entry *dst = sk->sk_rx_dst; struct dst_entry *dst = sk->sk_rx_dst;
struct inet_sock *icsk = inet_sk(sk);
if (dst) if (dst)
dst = dst_check(dst, 0); dst = dst_check(dst, 0);
if (dst) { if (dst &&
struct rtable *rt = (struct rtable *) dst; icsk->rx_dst_ifindex == dev->ifindex)
if (rt->rt_iif == dev->ifindex)
skb_dst_set_noref(skb, dst); skb_dst_set_noref(skb, dst);
} }
} }
}
} }
/* /*
......
...@@ -143,7 +143,7 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -143,7 +143,7 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
if (head == NULL) if (head == NULL)
goto old_method; goto old_method;
iif = ((struct rtable *)dst)->rt_iif; iif = inet_iif(skb);
h = route4_fastmap_hash(id, iif); h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id && if (id == head->fastmap[h].id &&
......
...@@ -264,7 +264,7 @@ META_COLLECTOR(int_rtiif) ...@@ -264,7 +264,7 @@ META_COLLECTOR(int_rtiif)
if (unlikely(skb_rtable(skb) == NULL)) if (unlikely(skb_rtable(skb) == NULL))
*err = -1; *err = -1;
else else
dst->value = skb_rtable(skb)->rt_iif; dst->value = inet_iif(skb);
} }
/************************************************************************** /**************************************************************************
......
...@@ -568,7 +568,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk, ...@@ -568,7 +568,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk,
/* What interface did this skb arrive on? */ /* What interface did this skb arrive on? */
static int sctp_v4_skb_iif(const struct sk_buff *skb) static int sctp_v4_skb_iif(const struct sk_buff *skb)
{ {
return skb_rtable(skb)->rt_iif; return inet_iif(skb);
} }
/* Was this packet marked by Explicit Congestion Notification? */ /* Was this packet marked by Explicit Congestion Notification? */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment