Commit 404eb77e authored by Roopa Prabhu's avatar Roopa Prabhu Committed by David S. Miller

ipv4: support sport, dport and ip_proto in RTM_GETROUTE

This is a followup to fib rules sport, dport and ipproto
match support. Only supports tcp, udp and icmp for ipproto.
Used by fib rule self tests.
Signed-off-by: default avatarRoopa Prabhu <roopa@cumulusnetworks.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 273de02a
......@@ -664,4 +664,7 @@ extern int sysctl_icmp_msgs_burst;
int ip_misc_proc_init(void);
#endif
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
struct netlink_ext_ack *extack);
#endif /* _IP_H */
......@@ -327,6 +327,9 @@ enum rtattr_type_t {
RTA_PAD,
RTA_UID,
RTA_TTL_PROPAGATE,
RTA_IP_PROTO,
RTA_SPORT,
RTA_DPORT,
__RTA_MAX
};
......
......@@ -14,7 +14,7 @@ obj-y := route.o inetpeer.o protocol.o \
udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \
inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o \
metrics.o
metrics.o netlink.o
obj-$(CONFIG_BPFILTER) += bpfilter/
......
......@@ -649,6 +649,9 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
[RTA_ENCAP] = { .type = NLA_NESTED },
[RTA_UID] = { .type = NLA_U32 },
[RTA_MARK] = { .type = NLA_U32 },
[RTA_IP_PROTO] = { .type = NLA_U8 },
[RTA_SPORT] = { .type = NLA_U16 },
[RTA_DPORT] = { .type = NLA_U16 },
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
......
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/ip.h>
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
struct netlink_ext_ack *extack)
{
*ip_proto = nla_get_u8(attr);
switch (*ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_ICMP:
return 0;
default:
NL_SET_ERR_MSG(extack, "Unsupported ip proto");
return -EOPNOTSUPP;
}
}
EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
......@@ -2574,11 +2574,10 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
EXPORT_SYMBOL_GPL(ip_route_output_flow);
/* called with rcu_read_lock held */
static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
u32 seq)
static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
struct rtable *rt, u32 table_id, struct flowi4 *fl4,
struct sk_buff *skb, u32 portid, u32 seq)
{
struct rtable *rt = skb_rtable(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned long expires = 0;
......@@ -2674,7 +2673,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
}
} else
#endif
if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
goto nla_put_failure;
}
......@@ -2689,43 +2688,93 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
return -EMSGSIZE;
}
static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
u8 ip_proto, __be16 sport,
__be16 dport)
{
struct sk_buff *skb;
struct iphdr *iph;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return NULL;
/* Reserve room for dummy headers, this skb can pass
* through good chunk of routing engine.
*/
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IP);
iph = skb_put(skb, sizeof(struct iphdr));
iph->protocol = ip_proto;
iph->saddr = src;
iph->daddr = dst;
iph->version = 0x4;
iph->frag_off = 0;
iph->ihl = 0x5;
skb_set_transport_header(skb, skb->len);
switch (iph->protocol) {
case IPPROTO_UDP: {
struct udphdr *udph;
udph = skb_put_zero(skb, sizeof(struct udphdr));
udph->source = sport;
udph->dest = dport;
udph->len = sizeof(struct udphdr);
udph->check = 0;
break;
}
case IPPROTO_TCP: {
struct tcphdr *tcph;
tcph = skb_put_zero(skb, sizeof(struct tcphdr));
tcph->source = sport;
tcph->dest = dport;
tcph->doff = sizeof(struct tcphdr) / 4;
tcph->rst = 1;
tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
src, dst, 0);
break;
}
case IPPROTO_ICMP: {
struct icmphdr *icmph;
icmph = skb_put_zero(skb, sizeof(struct icmphdr));
icmph->type = ICMP_ECHO;
icmph->code = 0;
}
}
return skb;
}
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
u32 table_id = RT_TABLE_MAIN;
__be16 sport = 0, dport = 0;
struct fib_result res = {};
u8 ip_proto = IPPROTO_UDP;
struct rtable *rt = NULL;
struct sk_buff *skb;
struct rtmsg *rtm;
struct flowi4 fl4;
__be32 dst = 0;
__be32 src = 0;
kuid_t uid;
u32 iif;
int err;
int mark;
struct sk_buff *skb;
u32 table_id = RT_TABLE_MAIN;
kuid_t uid;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
extack);
if (err < 0)
goto errout;
return err;
rtm = nlmsg_data(nlh);
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto errout;
}
/* Reserve room for dummy headers, this skb can pass
through good chunk of routing engine.
*/
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
......@@ -2735,14 +2784,22 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
else
uid = (iif ? INVALID_UID : current_uid());
/* Bugfix: need to give ip_route_input enough of an IP header to
* not gag.
*/
ip_hdr(skb)->protocol = IPPROTO_UDP;
ip_hdr(skb)->saddr = src;
ip_hdr(skb)->daddr = dst;
if (tb[RTA_IP_PROTO]) {
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
&ip_proto, extack);
if (err)
return err;
}
if (tb[RTA_SPORT])
sport = nla_get_be16(tb[RTA_SPORT]);
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
if (tb[RTA_DPORT])
dport = nla_get_be16(tb[RTA_DPORT]);
skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
if (!skb)
return -ENOBUFS;
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = dst;
......@@ -2751,6 +2808,11 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = uid;
if (sport)
fl4.fl4_sport = sport;
if (dport)
fl4.fl4_dport = dport;
fl4.flowi4_proto = ip_proto;
rcu_read_lock();
......@@ -2760,10 +2822,10 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
dev = dev_get_by_index_rcu(net, iif);
if (!dev) {
err = -ENODEV;
goto errout_free;
goto errout_rcu;
}
skb->protocol = htons(ETH_P_IP);
fl4.flowi4_iif = iif; /* for rt_fill_info */
skb->dev = dev;
skb->mark = mark;
err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
......@@ -2783,7 +2845,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
}
if (err)
goto errout_free;
goto errout_rcu;
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
......@@ -2791,34 +2853,40 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
table_id = res.table ? res.table->tb_id : 0;
/* reset skb for netlink reply msg */
skb_trim(skb, 0);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb_reset_mac_header(skb);
if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
if (!res.fi) {
err = fib_props[res.type].error;
if (!err)
err = -EHOSTUNREACH;
goto errout_free;
goto errout_rcu;
}
err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
rt->rt_type, res.prefix, res.prefixlen,
fl4.flowi4_tos, res.fi, 0);
} else {
err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
}
if (err < 0)
goto errout_free;
goto errout_rcu;
rcu_read_unlock();
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
errout_free:
return err;
errout_rcu:
rcu_read_unlock();
kfree_skb(skb);
goto errout;
goto errout_free;
}
void ip_rt_multicast_event(struct in_device *in_dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment