Commit 5863702a authored by Eric Dumazet's avatar Eric Dumazet Committed by Patrick McHardy

netfilter: nfnetlink_queue: assert monotonic packet ids

Packet identifier is currently setup in nfqnl_build_packet_message(),
using one atomic_inc_return().

Problem is that since several cpus might concurrently call
nfqnl_enqueue_packet() for the same queue, we can deliver packets to
consumer in non monotonic way (packet N+1 being delivered after packet
N)

This patch moves the packet id setup from nfqnl_build_packet_message()
to nfqnl_enqueue_packet() to guarantee correct delivery order.

This also removes one atomic operation.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
CC: Florian Westphal <fw@strlen.de>
CC: Pablo Neira Ayuso <pablo@netfilter.org>
CC: Eric Leblond <eric@regit.org>
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
parent 84a797dd
...@@ -58,7 +58,7 @@ struct nfqnl_instance { ...@@ -58,7 +58,7 @@ struct nfqnl_instance {
*/ */
spinlock_t lock; spinlock_t lock;
unsigned int queue_total; unsigned int queue_total;
atomic_t id_sequence; /* 'sequence' of pkt ids */ unsigned int id_sequence; /* 'sequence' of pkt ids */
struct list_head queue_list; /* packets in queue */ struct list_head queue_list; /* packets in queue */
}; };
...@@ -213,13 +213,15 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) ...@@ -213,13 +213,15 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
static struct sk_buff * static struct sk_buff *
nfqnl_build_packet_message(struct nfqnl_instance *queue, nfqnl_build_packet_message(struct nfqnl_instance *queue,
struct nf_queue_entry *entry) struct nf_queue_entry *entry,
__be32 **packet_id_ptr)
{ {
sk_buff_data_t old_tail; sk_buff_data_t old_tail;
size_t size; size_t size;
size_t data_len = 0; size_t data_len = 0;
struct sk_buff *skb; struct sk_buff *skb;
struct nfqnl_msg_packet_hdr pmsg; struct nlattr *nla;
struct nfqnl_msg_packet_hdr *pmsg;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg; struct nfgenmsg *nfmsg;
struct sk_buff *entskb = entry->skb; struct sk_buff *entskb = entry->skb;
...@@ -272,12 +274,11 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ...@@ -272,12 +274,11 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
nfmsg->version = NFNETLINK_V0; nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(queue->queue_num); nfmsg->res_id = htons(queue->queue_num);
entry->id = atomic_inc_return(&queue->id_sequence); nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
pmsg.packet_id = htonl(entry->id); pmsg = nla_data(nla);
pmsg.hw_protocol = entskb->protocol; pmsg->hw_protocol = entskb->protocol;
pmsg.hook = entry->hook; pmsg->hook = entry->hook;
*packet_id_ptr = &pmsg->packet_id;
NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
indev = entry->indev; indev = entry->indev;
if (indev) { if (indev) {
...@@ -388,6 +389,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) ...@@ -388,6 +389,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
struct sk_buff *nskb; struct sk_buff *nskb;
struct nfqnl_instance *queue; struct nfqnl_instance *queue;
int err = -ENOBUFS; int err = -ENOBUFS;
__be32 *packet_id_ptr;
/* rcu_read_lock()ed by nf_hook_slow() */ /* rcu_read_lock()ed by nf_hook_slow() */
queue = instance_lookup(queuenum); queue = instance_lookup(queuenum);
...@@ -401,7 +403,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) ...@@ -401,7 +403,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
goto err_out; goto err_out;
} }
nskb = nfqnl_build_packet_message(queue, entry); nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
if (nskb == NULL) { if (nskb == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_out; goto err_out;
...@@ -420,6 +422,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) ...@@ -420,6 +422,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
queue->queue_total); queue->queue_total);
goto err_out_free_nskb; goto err_out_free_nskb;
} }
entry->id = ++queue->id_sequence;
*packet_id_ptr = htonl(entry->id);
/* nfnetlink_unicast will either free the nskb or add it to a socket */ /* nfnetlink_unicast will either free the nskb or add it to a socket */
err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
...@@ -852,7 +856,7 @@ static int seq_show(struct seq_file *s, void *v) ...@@ -852,7 +856,7 @@ static int seq_show(struct seq_file *s, void *v)
inst->peer_pid, inst->queue_total, inst->peer_pid, inst->queue_total,
inst->copy_mode, inst->copy_range, inst->copy_mode, inst->copy_range,
inst->queue_dropped, inst->queue_user_dropped, inst->queue_dropped, inst->queue_user_dropped,
atomic_read(&inst->id_sequence), 1); inst->id_sequence, 1);
} }
static const struct seq_operations nfqnl_seq_ops = { static const struct seq_operations nfqnl_seq_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment