Commit 1eb99af5 authored by Nicolas Dichtel's avatar Nicolas Dichtel Committed by David S. Miller

ipmr/ip6mr: allow to get unresolved cache via netlink

/proc/net/ip[6]_mr_cache allows to get all mfc entries, even if they are put in
the unresolved list (mfc[6]_unres_queue). But only the table RT_TABLE_DEFAULT is
displayed.
This patch adds the parsing of the unresolved list when the dump is made via
rtnetlink, hence each table can be checked.

In IPv6, we set rtm_type in ip6mr_fill_mroute(), because in case of unresolved
mfc __ip6mr_fill_mroute() will not set it. In IPv4, it is already done.
Signed-off-by: default avatarNicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9a68ac72
...@@ -2154,6 +2154,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, ...@@ -2154,6 +2154,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
{ {
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct rtmsg *rtm; struct rtmsg *rtm;
int err;
nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
if (nlh == NULL) if (nlh == NULL)
...@@ -2178,7 +2179,9 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, ...@@ -2178,7 +2179,9 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) || if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp)) nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
goto nla_put_failure; goto nla_put_failure;
if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) err = __ipmr_fill_mroute(mrt, skb, c, rtm);
/* do not break the dump if cache is unresolved */
if (err < 0 && err != -ENOENT)
goto nla_put_failure; goto nla_put_failure;
return nlmsg_end(skb, nlh); return nlmsg_end(skb, nlh);
...@@ -2221,6 +2224,22 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -2221,6 +2224,22 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
} }
e = s_e = 0; e = s_e = 0;
} }
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
if (e < s_e)
goto next_entry2;
if (ipmr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
mfc) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
next_entry2:
e++;
}
spin_unlock_bh(&mfc_unres_lock);
e = s_e = 0;
s_h = 0; s_h = 0;
next_table: next_table:
t++; t++;
......
...@@ -2235,6 +2235,7 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, ...@@ -2235,6 +2235,7 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
{ {
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct rtmsg *rtm; struct rtmsg *rtm;
int err;
nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
if (nlh == NULL) if (nlh == NULL)
...@@ -2248,6 +2249,7 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, ...@@ -2248,6 +2249,7 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
rtm->rtm_table = mrt->id; rtm->rtm_table = mrt->id;
if (nla_put_u32(skb, RTA_TABLE, mrt->id)) if (nla_put_u32(skb, RTA_TABLE, mrt->id))
goto nla_put_failure; goto nla_put_failure;
rtm->rtm_type = RTN_MULTICAST;
rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_scope = RT_SCOPE_UNIVERSE;
if (c->mfc_flags & MFC_STATIC) if (c->mfc_flags & MFC_STATIC)
rtm->rtm_protocol = RTPROT_STATIC; rtm->rtm_protocol = RTPROT_STATIC;
...@@ -2258,7 +2260,9 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, ...@@ -2258,7 +2260,9 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) || if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp)) nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
goto nla_put_failure; goto nla_put_failure;
if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
/* do not break the dump if cache is unresolved */
if (err < 0 && err != -ENOENT)
goto nla_put_failure; goto nla_put_failure;
return nlmsg_end(skb, nlh); return nlmsg_end(skb, nlh);
...@@ -2301,6 +2305,22 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -2301,6 +2305,22 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
} }
e = s_e = 0; e = s_e = 0;
} }
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
if (e < s_e)
goto next_entry2;
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
mfc) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
next_entry2:
e++;
}
spin_unlock_bh(&mfc_unres_lock);
e = s_e = 0;
s_h = 0; s_h = 0;
next_table: next_table:
t++; t++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment