Commit 7c3d310d authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter/IPVS fixes for net

The following patchset contains Netfilter fixes for net:

1) Fix memleak reported by syzkaller when registering IPVS hooks,
   patch from Julian Anastasov.

2) Fix memory leak in start_sync_thread, also from Julian.

3) Fix conntrack deletion via ctnetlink, from Felix Kaechele.

4) Fix reject for ICMP due to incorrect checksum handling, from
   He Zhe.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 75dad252 5d154984
...@@ -808,11 +808,12 @@ struct ipvs_master_sync_state { ...@@ -808,11 +808,12 @@ struct ipvs_master_sync_state {
struct ip_vs_sync_buff *sync_buff; struct ip_vs_sync_buff *sync_buff;
unsigned long sync_queue_len; unsigned long sync_queue_len;
unsigned int sync_queue_delay; unsigned int sync_queue_delay;
struct task_struct *master_thread;
struct delayed_work master_wakeup_work; struct delayed_work master_wakeup_work;
struct netns_ipvs *ipvs; struct netns_ipvs *ipvs;
}; };
struct ip_vs_sync_thread_data;
/* How much time to keep dests in trash */ /* How much time to keep dests in trash */
#define IP_VS_DEST_TRASH_PERIOD (120 * HZ) #define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
...@@ -943,7 +944,8 @@ struct netns_ipvs { ...@@ -943,7 +944,8 @@ struct netns_ipvs {
spinlock_t sync_lock; spinlock_t sync_lock;
struct ipvs_master_sync_state *ms; struct ipvs_master_sync_state *ms;
spinlock_t sync_buff_lock; spinlock_t sync_buff_lock;
struct task_struct **backup_threads; struct ip_vs_sync_thread_data *master_tinfo;
struct ip_vs_sync_thread_data *backup_tinfo;
int threads_mask; int threads_mask;
volatile int sync_state; volatile int sync_state;
struct mutex sync_mutex; struct mutex sync_mutex;
......
...@@ -2245,7 +2245,6 @@ static const struct nf_hook_ops ip_vs_ops[] = { ...@@ -2245,7 +2245,6 @@ static const struct nf_hook_ops ip_vs_ops[] = {
static int __net_init __ip_vs_init(struct net *net) static int __net_init __ip_vs_init(struct net *net)
{ {
struct netns_ipvs *ipvs; struct netns_ipvs *ipvs;
int ret;
ipvs = net_generic(net, ip_vs_net_id); ipvs = net_generic(net, ip_vs_net_id);
if (ipvs == NULL) if (ipvs == NULL)
...@@ -2277,17 +2276,11 @@ static int __net_init __ip_vs_init(struct net *net) ...@@ -2277,17 +2276,11 @@ static int __net_init __ip_vs_init(struct net *net)
if (ip_vs_sync_net_init(ipvs) < 0) if (ip_vs_sync_net_init(ipvs) < 0)
goto sync_fail; goto sync_fail;
ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
if (ret < 0)
goto hook_fail;
return 0; return 0;
/* /*
* Error handling * Error handling
*/ */
hook_fail:
ip_vs_sync_net_cleanup(ipvs);
sync_fail: sync_fail:
ip_vs_conn_net_cleanup(ipvs); ip_vs_conn_net_cleanup(ipvs);
conn_fail: conn_fail:
...@@ -2317,6 +2310,19 @@ static void __net_exit __ip_vs_cleanup(struct net *net) ...@@ -2317,6 +2310,19 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
net->ipvs = NULL; net->ipvs = NULL;
} }
static int __net_init __ip_vs_dev_init(struct net *net)
{
int ret;
ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
if (ret < 0)
goto hook_fail;
return 0;
hook_fail:
return ret;
}
static void __net_exit __ip_vs_dev_cleanup(struct net *net) static void __net_exit __ip_vs_dev_cleanup(struct net *net)
{ {
struct netns_ipvs *ipvs = net_ipvs(net); struct netns_ipvs *ipvs = net_ipvs(net);
...@@ -2336,6 +2342,7 @@ static struct pernet_operations ipvs_core_ops = { ...@@ -2336,6 +2342,7 @@ static struct pernet_operations ipvs_core_ops = {
}; };
static struct pernet_operations ipvs_core_dev_ops = { static struct pernet_operations ipvs_core_dev_ops = {
.init = __ip_vs_dev_init,
.exit = __ip_vs_dev_cleanup, .exit = __ip_vs_dev_cleanup,
}; };
......
...@@ -2396,9 +2396,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) ...@@ -2396,9 +2396,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
cfg.syncid = dm->syncid; cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state); ret = start_sync_thread(ipvs, &cfg, dm->state);
} else { } else {
mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state); ret = stop_sync_thread(ipvs, dm->state);
mutex_unlock(&ipvs->sync_mutex);
} }
goto out_dec; goto out_dec;
} }
...@@ -3515,10 +3513,8 @@ static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs) ...@@ -3515,10 +3513,8 @@ static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
if (!attrs[IPVS_DAEMON_ATTR_STATE]) if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL; return -EINVAL;
mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, ret = stop_sync_thread(ipvs,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
mutex_unlock(&ipvs->sync_mutex);
return ret; return ret;
} }
......
...@@ -195,6 +195,7 @@ union ip_vs_sync_conn { ...@@ -195,6 +195,7 @@ union ip_vs_sync_conn {
#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1)) #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
struct ip_vs_sync_thread_data { struct ip_vs_sync_thread_data {
struct task_struct *task;
struct netns_ipvs *ipvs; struct netns_ipvs *ipvs;
struct socket *sock; struct socket *sock;
char *buf; char *buf;
...@@ -374,8 +375,11 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs, ...@@ -374,8 +375,11 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs,
max(IPVS_SYNC_SEND_DELAY, 1)); max(IPVS_SYNC_SEND_DELAY, 1));
ms->sync_queue_len++; ms->sync_queue_len++;
list_add_tail(&sb->list, &ms->sync_queue); list_add_tail(&sb->list, &ms->sync_queue);
if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
wake_up_process(ms->master_thread); int id = (int)(ms - ipvs->ms);
wake_up_process(ipvs->master_tinfo[id].task);
}
} else } else
ip_vs_sync_buff_release(sb); ip_vs_sync_buff_release(sb);
spin_unlock(&ipvs->sync_lock); spin_unlock(&ipvs->sync_lock);
...@@ -1636,8 +1640,10 @@ static void master_wakeup_work_handler(struct work_struct *work) ...@@ -1636,8 +1640,10 @@ static void master_wakeup_work_handler(struct work_struct *work)
spin_lock_bh(&ipvs->sync_lock); spin_lock_bh(&ipvs->sync_lock);
if (ms->sync_queue_len && if (ms->sync_queue_len &&
ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
int id = (int)(ms - ipvs->ms);
ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
wake_up_process(ms->master_thread); wake_up_process(ipvs->master_tinfo[id].task);
} }
spin_unlock_bh(&ipvs->sync_lock); spin_unlock_bh(&ipvs->sync_lock);
} }
...@@ -1703,10 +1709,6 @@ static int sync_thread_master(void *data) ...@@ -1703,10 +1709,6 @@ static int sync_thread_master(void *data)
if (sb) if (sb)
ip_vs_sync_buff_release(sb); ip_vs_sync_buff_release(sb);
/* release the sending multicast socket */
sock_release(tinfo->sock);
kfree(tinfo);
return 0; return 0;
} }
...@@ -1740,11 +1742,6 @@ static int sync_thread_backup(void *data) ...@@ -1740,11 +1742,6 @@ static int sync_thread_backup(void *data)
} }
} }
/* release the sending multicast socket */
sock_release(tinfo->sock);
kfree(tinfo->buf);
kfree(tinfo);
return 0; return 0;
} }
...@@ -1752,8 +1749,8 @@ static int sync_thread_backup(void *data) ...@@ -1752,8 +1749,8 @@ static int sync_thread_backup(void *data)
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int state) int state)
{ {
struct ip_vs_sync_thread_data *tinfo = NULL; struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
struct task_struct **array = NULL, *task; struct task_struct *task;
struct net_device *dev; struct net_device *dev;
char *name; char *name;
int (*threadfn)(void *data); int (*threadfn)(void *data);
...@@ -1822,7 +1819,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, ...@@ -1822,7 +1819,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
threadfn = sync_thread_master; threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) { } else if (state == IP_VS_STATE_BACKUP) {
result = -EEXIST; result = -EEXIST;
if (ipvs->backup_threads) if (ipvs->backup_tinfo)
goto out_early; goto out_early;
ipvs->bcfg = *c; ipvs->bcfg = *c;
...@@ -1849,28 +1846,22 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, ...@@ -1849,28 +1846,22 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
master_wakeup_work_handler); master_wakeup_work_handler);
ms->ipvs = ipvs; ms->ipvs = ipvs;
} }
} else {
array = kcalloc(count, sizeof(struct task_struct *),
GFP_KERNEL);
result = -ENOMEM;
if (!array)
goto out;
} }
result = -ENOMEM;
ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
GFP_KERNEL);
if (!ti)
goto out;
for (id = 0; id < count; id++) { for (id = 0; id < count; id++) {
result = -ENOMEM; tinfo = &ti[id];
tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
if (!tinfo)
goto out;
tinfo->ipvs = ipvs; tinfo->ipvs = ipvs;
tinfo->sock = NULL;
if (state == IP_VS_STATE_BACKUP) { if (state == IP_VS_STATE_BACKUP) {
result = -ENOMEM;
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen, tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL); GFP_KERNEL);
if (!tinfo->buf) if (!tinfo->buf)
goto out; goto out;
} else {
tinfo->buf = NULL;
} }
tinfo->id = id; tinfo->id = id;
if (state == IP_VS_STATE_MASTER) if (state == IP_VS_STATE_MASTER)
...@@ -1885,17 +1876,15 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, ...@@ -1885,17 +1876,15 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
result = PTR_ERR(task); result = PTR_ERR(task);
goto out; goto out;
} }
tinfo = NULL; tinfo->task = task;
if (state == IP_VS_STATE_MASTER)
ipvs->ms[id].master_thread = task;
else
array[id] = task;
} }
/* mark as active */ /* mark as active */
if (state == IP_VS_STATE_BACKUP) if (state == IP_VS_STATE_MASTER)
ipvs->backup_threads = array; ipvs->master_tinfo = ti;
else
ipvs->backup_tinfo = ti;
spin_lock_bh(&ipvs->sync_buff_lock); spin_lock_bh(&ipvs->sync_buff_lock);
ipvs->sync_state |= state; ipvs->sync_state |= state;
spin_unlock_bh(&ipvs->sync_buff_lock); spin_unlock_bh(&ipvs->sync_buff_lock);
...@@ -1910,29 +1899,31 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, ...@@ -1910,29 +1899,31 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
out: out:
/* We do not need RTNL lock anymore, release it here so that /* We do not need RTNL lock anymore, release it here so that
* sock_release below and in the kthreads can use rtnl_lock * sock_release below can use rtnl_lock to leave the mcast group.
* to leave the mcast group.
*/ */
rtnl_unlock(); rtnl_unlock();
count = id; id = min(id, count - 1);
while (count-- > 0) { if (ti) {
if (state == IP_VS_STATE_MASTER) for (tinfo = ti + id; tinfo >= ti; tinfo--) {
kthread_stop(ipvs->ms[count].master_thread); if (tinfo->task)
else kthread_stop(tinfo->task);
kthread_stop(array[count]); }
} }
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
kfree(ipvs->ms); kfree(ipvs->ms);
ipvs->ms = NULL; ipvs->ms = NULL;
} }
mutex_unlock(&ipvs->sync_mutex); mutex_unlock(&ipvs->sync_mutex);
if (tinfo) {
if (tinfo->sock) /* No more mutexes, release socks */
sock_release(tinfo->sock); if (ti) {
kfree(tinfo->buf); for (tinfo = ti + id; tinfo >= ti; tinfo--) {
kfree(tinfo); if (tinfo->sock)
sock_release(tinfo->sock);
kfree(tinfo->buf);
}
kfree(ti);
} }
kfree(array);
return result; return result;
out_early: out_early:
...@@ -1944,15 +1935,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, ...@@ -1944,15 +1935,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int stop_sync_thread(struct netns_ipvs *ipvs, int state) int stop_sync_thread(struct netns_ipvs *ipvs, int state)
{ {
struct task_struct **array; struct ip_vs_sync_thread_data *ti, *tinfo;
int id; int id;
int retc = -EINVAL; int retc = -EINVAL;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
mutex_lock(&ipvs->sync_mutex);
if (state == IP_VS_STATE_MASTER) { if (state == IP_VS_STATE_MASTER) {
retc = -ESRCH;
if (!ipvs->ms) if (!ipvs->ms)
return -ESRCH; goto err;
ti = ipvs->master_tinfo;
/* /*
* The lock synchronizes with sb_queue_tail(), so that we don't * The lock synchronizes with sb_queue_tail(), so that we don't
...@@ -1971,38 +1965,56 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state) ...@@ -1971,38 +1965,56 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state)
struct ipvs_master_sync_state *ms = &ipvs->ms[id]; struct ipvs_master_sync_state *ms = &ipvs->ms[id];
int ret; int ret;
tinfo = &ti[id];
pr_info("stopping master sync thread %d ...\n", pr_info("stopping master sync thread %d ...\n",
task_pid_nr(ms->master_thread)); task_pid_nr(tinfo->task));
cancel_delayed_work_sync(&ms->master_wakeup_work); cancel_delayed_work_sync(&ms->master_wakeup_work);
ret = kthread_stop(ms->master_thread); ret = kthread_stop(tinfo->task);
if (retc >= 0) if (retc >= 0)
retc = ret; retc = ret;
} }
kfree(ipvs->ms); kfree(ipvs->ms);
ipvs->ms = NULL; ipvs->ms = NULL;
ipvs->master_tinfo = NULL;
} else if (state == IP_VS_STATE_BACKUP) { } else if (state == IP_VS_STATE_BACKUP) {
if (!ipvs->backup_threads) retc = -ESRCH;
return -ESRCH; if (!ipvs->backup_tinfo)
goto err;
ti = ipvs->backup_tinfo;
ipvs->sync_state &= ~IP_VS_STATE_BACKUP; ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
array = ipvs->backup_threads;
retc = 0; retc = 0;
for (id = ipvs->threads_mask; id >= 0; id--) { for (id = ipvs->threads_mask; id >= 0; id--) {
int ret; int ret;
tinfo = &ti[id];
pr_info("stopping backup sync thread %d ...\n", pr_info("stopping backup sync thread %d ...\n",
task_pid_nr(array[id])); task_pid_nr(tinfo->task));
ret = kthread_stop(array[id]); ret = kthread_stop(tinfo->task);
if (retc >= 0) if (retc >= 0)
retc = ret; retc = ret;
} }
kfree(array); ipvs->backup_tinfo = NULL;
ipvs->backup_threads = NULL; } else {
goto err;
} }
id = ipvs->threads_mask;
mutex_unlock(&ipvs->sync_mutex);
/* No more mutexes, release socks */
for (tinfo = ti + id; tinfo >= ti; tinfo--) {
if (tinfo->sock)
sock_release(tinfo->sock);
kfree(tinfo->buf);
}
kfree(ti);
/* decrease the module use count */ /* decrease the module use count */
ip_vs_use_count_dec(); ip_vs_use_count_dec();
return retc;
err:
mutex_unlock(&ipvs->sync_mutex);
return retc; return retc;
} }
...@@ -2021,7 +2033,6 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs) ...@@ -2021,7 +2033,6 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
{ {
int retc; int retc;
mutex_lock(&ipvs->sync_mutex);
retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER); retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
if (retc && retc != -ESRCH) if (retc && retc != -ESRCH)
pr_err("Failed to stop Master Daemon\n"); pr_err("Failed to stop Master Daemon\n");
...@@ -2029,5 +2040,4 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs) ...@@ -2029,5 +2040,4 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP); retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
if (retc && retc != -ESRCH) if (retc && retc != -ESRCH)
pr_err("Failed to stop Backup Daemon\n"); pr_err("Failed to stop Backup Daemon\n");
mutex_unlock(&ipvs->sync_mutex);
} }
...@@ -1256,7 +1256,6 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl, ...@@ -1256,7 +1256,6 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple tuple;
struct nf_conn *ct; struct nf_conn *ct;
struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
struct nf_conntrack_zone zone; struct nf_conntrack_zone zone;
int err; int err;
...@@ -1266,11 +1265,13 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl, ...@@ -1266,11 +1265,13 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
if (cda[CTA_TUPLE_ORIG]) if (cda[CTA_TUPLE_ORIG])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
u3, &zone); nfmsg->nfgen_family, &zone);
else if (cda[CTA_TUPLE_REPLY]) else if (cda[CTA_TUPLE_REPLY])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
u3, &zone); nfmsg->nfgen_family, &zone);
else { else {
u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
return ctnetlink_flush_conntrack(net, cda, return ctnetlink_flush_conntrack(net, cda,
NETLINK_CB(skb).portid, NETLINK_CB(skb).portid,
nlmsg_report(nlh), u3); nlmsg_report(nlh), u3);
......
...@@ -218,7 +218,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, ...@@ -218,7 +218,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
/* See ip_conntrack_proto_tcp.c */ /* See ip_conntrack_proto_tcp.c */
if (state->net->ct.sysctl_checksum && if (state->net->ct.sysctl_checksum &&
state->hook == NF_INET_PRE_ROUTING && state->hook == NF_INET_PRE_ROUTING &&
nf_ip_checksum(skb, state->hook, dataoff, 0)) { nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
icmp_error_log(skb, state, "bad hw icmp checksum"); icmp_error_log(skb, state, "bad hw icmp checksum");
return -NF_ACCEPT; return -NF_ACCEPT;
} }
......
...@@ -564,7 +564,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, ...@@ -564,7 +564,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
if (!skb_make_writable(skb, hdrlen + sizeof(*inside))) if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
return 0; return 0;
if (nf_ip_checksum(skb, hooknum, hdrlen, 0)) if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
return 0; return 0;
inside = (void *)skb->data + hdrlen; inside = (void *)skb->data + hdrlen;
......
...@@ -17,7 +17,8 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, ...@@ -17,7 +17,8 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
case CHECKSUM_COMPLETE: case CHECKSUM_COMPLETE:
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
break; break;
if ((protocol == 0 && !csum_fold(skb->csum)) || if ((protocol != IPPROTO_TCP && protocol != IPPROTO_UDP &&
!csum_fold(skb->csum)) ||
!csum_tcpudp_magic(iph->saddr, iph->daddr, !csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - dataoff, protocol, skb->len - dataoff, protocol,
skb->csum)) { skb->csum)) {
...@@ -26,7 +27,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, ...@@ -26,7 +27,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
} }
/* fall through */ /* fall through */
case CHECKSUM_NONE: case CHECKSUM_NONE:
if (protocol == 0) if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP)
skb->csum = 0; skb->csum = 0;
else else
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment