Commit 7f495ad9 authored by Ian Morris's avatar Ian Morris Committed by Pablo Neira Ayuso

netfilter-bridge: use netdev style comments

Changes comments to use netdev style.

No changes detected by objdiff.
Signed-off-by: default avatarIan Morris <ipm@chirality.org.uk>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 052a4bc4
...@@ -152,7 +152,8 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, ...@@ -152,7 +152,8 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
ntohs(ah->ar_op)); ntohs(ah->ar_op));
/* If it's for Ethernet and the lengths are OK, /* If it's for Ethernet and the lengths are OK,
* then log the ARP payload */ * then log the ARP payload
*/
if (ah->ar_hrd == htons(1) && if (ah->ar_hrd == htons(1) &&
ah->ar_hln == ETH_ALEN && ah->ar_hln == ETH_ALEN &&
ah->ar_pln == sizeof(__be32)) { ah->ar_pln == sizeof(__be32)) {
......
...@@ -66,7 +66,8 @@ ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -66,7 +66,8 @@ ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
* - Canonical Format Indicator (CFI). The Canonical Format Indicator * - Canonical Format Indicator (CFI). The Canonical Format Indicator
* (CFI) is a single bit flag value. Currently ignored. * (CFI) is a single bit flag value. Currently ignored.
* - VLAN Identifier (VID). The VID is encoded as * - VLAN Identifier (VID). The VID is encoded as
* an unsigned binary number. */ * an unsigned binary number.
*/
id = TCI & VLAN_VID_MASK; id = TCI & VLAN_VID_MASK;
prio = (TCI >> 13) & 0x7; prio = (TCI >> 13) & 0x7;
...@@ -98,7 +99,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par) ...@@ -98,7 +99,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
} }
/* Check for bitmask range /* Check for bitmask range
* True if even one bit is out of mask */ * True if even one bit is out of mask
*/
if (info->bitmask & ~EBT_VLAN_MASK) { if (info->bitmask & ~EBT_VLAN_MASK) {
pr_debug("bitmask %2X is out of mask (%2X)\n", pr_debug("bitmask %2X is out of mask (%2X)\n",
info->bitmask, EBT_VLAN_MASK); info->bitmask, EBT_VLAN_MASK);
...@@ -117,7 +119,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par) ...@@ -117,7 +119,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
* 0 - The null VLAN ID. * 0 - The null VLAN ID.
* 1 - The default Port VID (PVID) * 1 - The default Port VID (PVID)
* 0x0FFF - Reserved for implementation use. * 0x0FFF - Reserved for implementation use.
* if_vlan.h: VLAN_N_VID 4096. */ * if_vlan.h: VLAN_N_VID 4096.
*/
if (GET_BITMASK(EBT_VLAN_ID)) { if (GET_BITMASK(EBT_VLAN_ID)) {
if (!!info->id) { /* if id!=0 => check vid range */ if (!!info->id) { /* if id!=0 => check vid range */
if (info->id > VLAN_N_VID) { if (info->id > VLAN_N_VID) {
...@@ -128,7 +131,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par) ...@@ -128,7 +131,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
/* Note: This is valid VLAN-tagged frame point. /* Note: This is valid VLAN-tagged frame point.
* Any value of user_priority are acceptable, * Any value of user_priority are acceptable,
* but should be ignored according to 802.1Q Std. * but should be ignored according to 802.1Q Std.
* So we just drop the prio flag. */ * So we just drop the prio flag.
*/
info->bitmask &= ~EBT_VLAN_PRIO; info->bitmask &= ~EBT_VLAN_PRIO;
} }
/* Else, id=0 (null VLAN ID) => user_priority range (any?) */ /* Else, id=0 (null VLAN ID) => user_priority range (any?) */
...@@ -143,7 +147,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par) ...@@ -143,7 +147,8 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
} }
/* Check for encapsulated proto range - it is possible to be /* Check for encapsulated proto range - it is possible to be
* any value for u_short range. * any value for u_short range.
* if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS */ * if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS
*/
if (GET_BITMASK(EBT_VLAN_ENCAP)) { if (GET_BITMASK(EBT_VLAN_ENCAP)) {
if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) { if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) {
pr_debug("encap frame length %d is less than " pr_debug("encap frame length %d is less than "
......
...@@ -35,8 +35,7 @@ ...@@ -35,8 +35,7 @@
"report to author: "format, ## args) "report to author: "format, ## args)
/* #define BUGPRINT(format, args...) */ /* #define BUGPRINT(format, args...) */
/* /* Each cpu has its own set of counters, so there is no need for write_lock in
* Each cpu has its own set of counters, so there is no need for write_lock in
* the softirq * the softirq
* For reading or updating the counters, the user context needs to * For reading or updating the counters, the user context needs to
* get a write_lock * get a write_lock
...@@ -237,7 +236,8 @@ unsigned int ebt_do_table(struct sk_buff *skb, ...@@ -237,7 +236,8 @@ unsigned int ebt_do_table(struct sk_buff *skb,
(*(counter_base + i)).bcnt += skb->len; (*(counter_base + i)).bcnt += skb->len;
/* these should only watch: not modify, nor tell us /* these should only watch: not modify, nor tell us
what to do with the packet */ * what to do with the packet
*/
EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar); EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
t = (struct ebt_entry_target *) t = (struct ebt_entry_target *)
...@@ -451,7 +451,8 @@ static int ebt_verify_pointers(const struct ebt_replace *repl, ...@@ -451,7 +451,8 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) { if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
if (e->bitmask != 0) { if (e->bitmask != 0) {
/* we make userspace set this right, /* we make userspace set this right,
so there is no misunderstanding */ * so there is no misunderstanding
*/
BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set " BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
"in distinguisher\n"); "in distinguisher\n");
return -EINVAL; return -EINVAL;
...@@ -487,8 +488,7 @@ static int ebt_verify_pointers(const struct ebt_replace *repl, ...@@ -487,8 +488,7 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
return 0; return 0;
} }
/* /* this one is very careful, as it is the first function
* this one is very careful, as it is the first function
* to parse the userspace data * to parse the userspace data
*/ */
static inline int static inline int
...@@ -504,10 +504,12 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e, ...@@ -504,10 +504,12 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
break; break;
} }
/* beginning of a new chain /* beginning of a new chain
if i == NF_BR_NUMHOOKS it must be a user defined chain */ * if i == NF_BR_NUMHOOKS it must be a user defined chain
*/
if (i != NF_BR_NUMHOOKS || !e->bitmask) { if (i != NF_BR_NUMHOOKS || !e->bitmask) {
/* this checks if the previous chain has as many entries /* this checks if the previous chain has as many entries
as it said it has */ * as it said it has
*/
if (*n != *cnt) { if (*n != *cnt) {
BUGPRINT("nentries does not equal the nr of entries " BUGPRINT("nentries does not equal the nr of entries "
"in the chain\n"); "in the chain\n");
...@@ -556,8 +558,7 @@ struct ebt_cl_stack ...@@ -556,8 +558,7 @@ struct ebt_cl_stack
unsigned int hookmask; unsigned int hookmask;
}; };
/* /* We need these positions to check that the jumps to a different part of the
* we need these positions to check that the jumps to a different part of the
* entries is a jump to the beginning of a new chain. * entries is a jump to the beginning of a new chain.
*/ */
static inline int static inline int
...@@ -687,7 +688,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, ...@@ -687,7 +688,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
break; break;
} }
/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
a base chain */ * a base chain
*/
if (i < NF_BR_NUMHOOKS) if (i < NF_BR_NUMHOOKS)
hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
else { else {
...@@ -758,8 +760,7 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, ...@@ -758,8 +760,7 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
return ret; return ret;
} }
/* /* checks for loops and sets the hook mask for udc
* checks for loops and sets the hook mask for udc
* the hook mask for udc tells us from which base chains the udc can be * the hook mask for udc tells us from which base chains the udc can be
* accessed. This mask is a parameter to the check() functions of the extensions * accessed. This mask is a parameter to the check() functions of the extensions
*/ */
...@@ -853,7 +854,8 @@ static int translate_table(struct net *net, const char *name, ...@@ -853,7 +854,8 @@ static int translate_table(struct net *net, const char *name,
return -EINVAL; return -EINVAL;
} }
/* make sure chains are ordered after each other in same order /* make sure chains are ordered after each other in same order
as their corresponding hooks */ * as their corresponding hooks
*/
for (j = i + 1; j < NF_BR_NUMHOOKS; j++) { for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
if (!newinfo->hook_entry[j]) if (!newinfo->hook_entry[j])
continue; continue;
...@@ -868,7 +870,8 @@ static int translate_table(struct net *net, const char *name, ...@@ -868,7 +870,8 @@ static int translate_table(struct net *net, const char *name,
i = 0; /* holds the expected nr. of entries for the chain */ i = 0; /* holds the expected nr. of entries for the chain */
j = 0; /* holds the up to now counted entries for the chain */ j = 0; /* holds the up to now counted entries for the chain */
k = 0; /* holds the total nr. of entries, should equal k = 0; /* holds the total nr. of entries, should equal
newinfo->nentries afterwards */ * newinfo->nentries afterwards
*/
udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */ udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
ebt_check_entry_size_and_hooks, newinfo, ebt_check_entry_size_and_hooks, newinfo,
...@@ -888,10 +891,12 @@ static int translate_table(struct net *net, const char *name, ...@@ -888,10 +891,12 @@ static int translate_table(struct net *net, const char *name,
} }
/* get the location of the udc, put them in an array /* get the location of the udc, put them in an array
while we're at it, allocate the chainstack */ * while we're at it, allocate the chainstack
*/
if (udc_cnt) { if (udc_cnt) {
/* this will get free'd in do_replace()/ebt_register_table() /* this will get free'd in do_replace()/ebt_register_table()
if an error occurs */ * if an error occurs
*/
newinfo->chainstack = newinfo->chainstack =
vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack))); vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
if (!newinfo->chainstack) if (!newinfo->chainstack)
...@@ -932,14 +937,15 @@ static int translate_table(struct net *net, const char *name, ...@@ -932,14 +937,15 @@ static int translate_table(struct net *net, const char *name,
} }
/* we now know the following (along with E=mc²): /* we now know the following (along with E=mc²):
- the nr of entries in each chain is right * - the nr of entries in each chain is right
- the size of the allocated space is right * - the size of the allocated space is right
- all valid hooks have a corresponding chain * - all valid hooks have a corresponding chain
- there are no loops * - there are no loops
- wrong data can still be on the level of a single entry * - wrong data can still be on the level of a single entry
- could be there are jumps to places that are not the * - could be there are jumps to places that are not the
beginning of a chain. This can only occur in chains that * beginning of a chain. This can only occur in chains that
are not accessible from any base chains, so we don't care. */ * are not accessible from any base chains, so we don't care.
*/
/* used to know what we need to clean up if something goes wrong */ /* used to know what we need to clean up if something goes wrong */
i = 0; i = 0;
...@@ -986,7 +992,8 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, ...@@ -986,7 +992,8 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
struct ebt_table *t; struct ebt_table *t;
/* the user wants counters back /* the user wants counters back
the check on the size is done later, when we have the lock */ * the check on the size is done later, when we have the lock
*/
if (repl->num_counters) { if (repl->num_counters) {
unsigned long size = repl->num_counters * sizeof(*counterstmp); unsigned long size = repl->num_counters * sizeof(*counterstmp);
counterstmp = vmalloc(size); counterstmp = vmalloc(size);
...@@ -1038,9 +1045,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, ...@@ -1038,9 +1045,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
write_unlock_bh(&t->lock); write_unlock_bh(&t->lock);
mutex_unlock(&ebt_mutex); mutex_unlock(&ebt_mutex);
/* so, a user can change the chains while having messed up her counter /* so, a user can change the chains while having messed up her counter
allocation. Only reason why this is done is because this way the lock * allocation. Only reason why this is done is because this way the lock
is held only once, while this doesn't bring the kernel into a * is held only once, while this doesn't bring the kernel into a
dangerous state. */ * dangerous state.
*/
if (repl->num_counters && if (repl->num_counters &&
copy_to_user(repl->counters, counterstmp, copy_to_user(repl->counters, counterstmp,
repl->num_counters * sizeof(struct ebt_counter))) { repl->num_counters * sizeof(struct ebt_counter))) {
...@@ -1348,7 +1356,8 @@ static inline int ebt_make_matchname(const struct ebt_entry_match *m, ...@@ -1348,7 +1356,8 @@ static inline int ebt_make_matchname(const struct ebt_entry_match *m,
char name[EBT_FUNCTION_MAXNAMELEN] = {}; char name[EBT_FUNCTION_MAXNAMELEN] = {};
/* ebtables expects 32 bytes long names but xt_match names are 29 bytes /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
long. Copy 29 bytes and fill remaining bytes with zeroes. */ * long. Copy 29 bytes and fill remaining bytes with zeroes.
*/
strlcpy(name, m->u.match->name, sizeof(name)); strlcpy(name, m->u.match->name, sizeof(name));
if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT; return -EFAULT;
...@@ -1595,8 +1604,7 @@ static int ebt_compat_entry_padsize(void) ...@@ -1595,8 +1604,7 @@ static int ebt_compat_entry_padsize(void)
static int ebt_compat_match_offset(const struct xt_match *match, static int ebt_compat_match_offset(const struct xt_match *match,
unsigned int userlen) unsigned int userlen)
{ {
/* /* ebt_among needs special handling. The kernel .matchsize is
* ebt_among needs special handling. The kernel .matchsize is
* set to -1 at registration time; at runtime an EBT_ALIGN()ed * set to -1 at registration time; at runtime an EBT_ALIGN()ed
* value is expected. * value is expected.
* Example: userspace sends 4500, ebt_among.c wants 4504. * Example: userspace sends 4500, ebt_among.c wants 4504.
...@@ -1966,8 +1974,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, ...@@ -1966,8 +1974,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
return off + match_size; return off + match_size;
} }
/* /* return size of all matches, watchers or target, including necessary
* return size of all matches, watchers or target, including necessary
* alignment and padding. * alignment and padding.
*/ */
static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
...@@ -2070,8 +2077,7 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, ...@@ -2070,8 +2077,7 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
if (ret < 0) if (ret < 0)
return ret; return ret;
buf_start = (char *) entry; buf_start = (char *) entry;
/* /* 0: matches offset, always follows ebt_entry.
* 0: matches offset, always follows ebt_entry.
* 1: watchers offset, from ebt_entry structure * 1: watchers offset, from ebt_entry structure
* 2: target offset, from ebt_entry structure * 2: target offset, from ebt_entry structure
* 3: next ebt_entry offset, from ebt_entry structure * 3: next ebt_entry offset, from ebt_entry structure
...@@ -2115,8 +2121,7 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, ...@@ -2115,8 +2121,7 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
return 0; return 0;
} }
/* /* repl->entries_size is the size of the ebt_entry blob in userspace.
* repl->entries_size is the size of the ebt_entry blob in userspace.
* It might need more memory when copied to a 64 bit kernel in case * It might need more memory when copied to a 64 bit kernel in case
* userspace is 32-bit. So, first task: find out how much memory is needed. * userspace is 32-bit. So, first task: find out how much memory is needed.
* *
...@@ -2360,8 +2365,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, ...@@ -2360,8 +2365,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
break; break;
case EBT_SO_GET_ENTRIES: case EBT_SO_GET_ENTRIES:
case EBT_SO_GET_INIT_ENTRIES: case EBT_SO_GET_INIT_ENTRIES:
/* /* try real handler first in case of userland-side padding.
* try real handler first in case of userland-side padding.
* in case we are dealing with an 'ordinary' 32 bit binary * in case we are dealing with an 'ordinary' 32 bit binary
* without 64bit compatibility padding, this will fail right * without 64bit compatibility padding, this will fail right
* after copy_from_user when the *len argument is validated. * after copy_from_user when the *len argument is validated.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment