Commit 324318f0 authored by Willem de Bruijn's avatar Willem de Bruijn Committed by Pablo Neira Ayuso

netfilter: xtables: zero padding in data_to_user

When looking up an iptables rule, the iptables binary compares the
aligned match and target data (XT_ALIGN). In some cases this can
exceed the actual data size to include padding bytes.

Before commit f77bc5b2 ("iptables: use match, target and data
copy_to_user helpers") the malloc()ed bytes were overwritten by the
kernel with kzalloced contents, zeroing the padding and making the
comparison succeed. After this patch, the kernel copies and clears
only data, leaving the padding bytes undefined.

Extend the clear operation from data size to aligned data size to
include the padding bytes, if any.

Padding bytes can be observed in both match and target, and the bug
triggered, by issuing a rule with match icmp and target ACCEPT:

  iptables -t mangle -A INPUT -i lo -p icmp --icmp-type 1 -j ACCEPT
  iptables -t mangle -D INPUT -i lo -p icmp --icmp-type 1 -j ACCEPT

Fixes: f77bc5b2 ("iptables: use match, target and data copy_to_user helpers")
Reported-by: default avatarPaul Moore <pmoore@redhat.com>
Reported-by: default avatarRichard Guy Briggs <rgb@redhat.com>
Signed-off-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent ff1e4300
......@@ -294,7 +294,7 @@ int xt_match_to_user(const struct xt_entry_match *m,
int xt_target_to_user(const struct xt_entry_target *t,
struct xt_entry_target __user *u);
int xt_data_to_user(void __user *dst, const void *src,
int usersize, int size);
int usersize, int size, int aligned_size);
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
struct xt_counters_info *info, bool compat);
......
......@@ -1373,7 +1373,8 @@ static inline int ebt_obj_to_user(char __user *um, const char *_name,
strlcpy(name, _name, sizeof(name));
if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) ||
put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) ||
xt_data_to_user(um + entrysize, data, usersize, datasize))
xt_data_to_user(um + entrysize, data, usersize, datasize,
XT_ALIGN(datasize)))
return -EFAULT;
return 0;
......@@ -1658,7 +1659,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
if (match->compat_to_user(cm->data, m->data))
return -EFAULT;
} else {
if (xt_data_to_user(cm->data, m->data, match->usersize, msize))
if (xt_data_to_user(cm->data, m->data, match->usersize, msize,
COMPAT_XT_ALIGN(msize)))
return -EFAULT;
}
......@@ -1687,7 +1689,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
if (target->compat_to_user(cm->data, t->data))
return -EFAULT;
} else {
if (xt_data_to_user(cm->data, t->data, target->usersize, tsize))
if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
COMPAT_XT_ALIGN(tsize)))
return -EFAULT;
}
......
......@@ -283,12 +283,13 @@ static int xt_obj_to_user(u16 __user *psize, u16 size,
&U->u.user.revision, K->u.kernel.TYPE->revision)
int xt_data_to_user(void __user *dst, const void *src,
int usersize, int size)
int usersize, int size, int aligned_size)
{
usersize = usersize ? : size;
if (copy_to_user(dst, src, usersize))
return -EFAULT;
if (usersize != size && clear_user(dst + usersize, size - usersize))
if (usersize != aligned_size &&
clear_user(dst + usersize, aligned_size - usersize))
return -EFAULT;
return 0;
......@@ -298,7 +299,9 @@ EXPORT_SYMBOL_GPL(xt_data_to_user);
#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
xt_data_to_user(U->data, K->data, \
K->u.kernel.TYPE->usersize, \
C_SIZE ? : K->u.kernel.TYPE->TYPE##size)
C_SIZE ? : K->u.kernel.TYPE->TYPE##size, \
C_SIZE ? COMPAT_XT_ALIGN(C_SIZE) : \
XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
int xt_match_to_user(const struct xt_entry_match *m,
struct xt_entry_match __user *u)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment