Commit dfde331e authored by GhantaKrishnamurthy MohanKrishna's avatar GhantaKrishnamurthy MohanKrishna Committed by David S. Miller

tipc: modify socket iterator for sock_diag

The current socket iterator function tipc_nl_sk_dump, handles socket
locks and calls __tipc_nl_add_sk for each socket.
To reuse this logic in sock_diag implementation, we do minor
modifications to make these functions generic as described below.

In this commit, we add a two new functions __tipc_nl_sk_walk,
__tipc_nl_add_sk_info and modify tipc_nl_sk_dump, __tipc_nl_add_sk
accordingly.

In __tipc_nl_sk_walk we:
1. acquire and release socket locks
2. for each socket, execute the specified callback function

In __tipc_nl_add_sk we:
- Move the netlink attribute insertion to __tipc_nl_add_sk_info.

tipc_nl_sk_dump calls tipc_nl_sk_walk with __tipc_nl_add_sk as argument.

sock_diag will use these generic functions in a later commit.

There is no functional change in this commit.
Acked-by: default avatarJon Maloy <jon.maloy@ericsson.com>
Acked-by: default avatarYing Xue <ying.xue@windriver.com>
Signed-off-by: default avatarGhantaKrishnamurthy MohanKrishna <mohan.krishna.ghanta.krishnamurthy@ericsson.com>
Signed-off-by: default avatarParthasarathy Bhuvaragan <parthasarathy.bhuvaragan@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 334e7678
...@@ -3160,16 +3160,33 @@ static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) ...@@ -3160,16 +3160,33 @@ static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
return -EMSGSIZE; return -EMSGSIZE;
} }
static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
*tsk)
{
struct net *net = sock_net(skb->sk);
struct tipc_net *tn = tipc_net(net);
struct sock *sk = &tsk->sk;
if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
return -EMSGSIZE;
if (tipc_sk_connected(sk)) {
if (__tipc_nl_add_sk_con(skb, tsk))
return -EMSGSIZE;
} else if (!list_empty(&tsk->publications)) {
if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
return -EMSGSIZE;
}
return 0;
}
/* Caller should hold socket lock for the passed tipc socket. */ /* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
struct tipc_sock *tsk) struct tipc_sock *tsk)
{ {
int err;
void *hdr;
struct nlattr *attrs; struct nlattr *attrs;
struct net *net = sock_net(skb->sk); void *hdr;
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sock *sk = &tsk->sk;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
...@@ -3179,19 +3196,10 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, ...@@ -3179,19 +3196,10 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
attrs = nla_nest_start(skb, TIPC_NLA_SOCK); attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
if (!attrs) if (!attrs)
goto genlmsg_cancel; goto genlmsg_cancel;
if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
goto attr_msg_cancel; if (__tipc_nl_add_sk_info(skb, tsk))
if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
goto attr_msg_cancel; goto attr_msg_cancel;
if (tipc_sk_connected(sk)) {
err = __tipc_nl_add_sk_con(skb, tsk);
if (err)
goto attr_msg_cancel;
} else if (!list_empty(&tsk->publications)) {
if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
goto attr_msg_cancel;
}
nla_nest_end(skb, attrs); nla_nest_end(skb, attrs);
genlmsg_end(skb, hdr); genlmsg_end(skb, hdr);
...@@ -3205,16 +3213,19 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, ...@@ -3205,16 +3213,19 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
return -EMSGSIZE; return -EMSGSIZE;
} }
int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) static int __tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
int (*skb_handler)(struct sk_buff *skb,
struct netlink_callback *cb,
struct tipc_sock *tsk))
{ {
int err;
struct tipc_sock *tsk;
const struct bucket_table *tbl;
struct rhash_head *pos;
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_net *tn = tipc_net(net);
u32 tbl_id = cb->args[0]; const struct bucket_table *tbl;
u32 prev_portid = cb->args[1]; u32 prev_portid = cb->args[1];
u32 tbl_id = cb->args[0];
struct rhash_head *pos;
struct tipc_sock *tsk;
int err;
rcu_read_lock(); rcu_read_lock();
tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
...@@ -3226,12 +3237,13 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -3226,12 +3237,13 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
continue; continue;
} }
err = __tipc_nl_add_sk(skb, cb, tsk); err = skb_handler(skb, cb, tsk);
if (err) { if (err) {
prev_portid = tsk->portid; prev_portid = tsk->portid;
spin_unlock_bh(&tsk->sk.sk_lock.slock); spin_unlock_bh(&tsk->sk.sk_lock.slock);
goto out; goto out;
} }
prev_portid = 0; prev_portid = 0;
spin_unlock_bh(&tsk->sk.sk_lock.slock); spin_unlock_bh(&tsk->sk.sk_lock.slock);
} }
...@@ -3244,6 +3256,11 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -3244,6 +3256,11 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len; return skb->len;
} }
int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
return __tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
}
/* Caller should hold socket lock for the passed tipc socket. */ /* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_add_sk_publ(struct sk_buff *skb, static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
struct netlink_callback *cb, struct netlink_callback *cb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment