Commit cca1d815 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: fix HAVE_EFFICIENT_UNALIGNED_ACCESS typos

HAVE_EFFICIENT_UNALIGNED_ACCESS needs CONFIG_ prefix.

Also add a comment in nla_align_64bit() explaining we have
to add a padding if current skb->data is aligned, as it
certainly can be confusing.

Fixes: 35c58459 ("net: Add helpers for 64-bit aligning netlink attributes.")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b84e9307
...@@ -1238,18 +1238,21 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, ...@@ -1238,18 +1238,21 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
* Conditionally emit a padding netlink attribute in order to make * Conditionally emit a padding netlink attribute in order to make
* the next attribute we emit have a 64-bit aligned nla_data() area. * the next attribute we emit have a 64-bit aligned nla_data() area.
* This will only be done in architectures which do not have * This will only be done in architectures which do not have
* HAVE_EFFICIENT_UNALIGNED_ACCESS defined. * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
* *
* Returns zero on success or a negative error code. * Returns zero on success or a negative error code.
*/ */
static inline int nla_align_64bit(struct sk_buff *skb, int padattr) static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
{ {
#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (IS_ALIGNED((unsigned long)skb->data, 8)) { /* The nlattr header is 4 bytes in size, that's why we test
struct nlattr *attr = nla_reserve(skb, padattr, 0); * if the skb->data _is_ aligned. This NOP attribute, plus
if (!attr) * nlattr header for next attribute, will make nla_data()
return -EMSGSIZE; * 8-byte aligned.
} */
if (IS_ALIGNED((unsigned long)skb->data, 8) &&
!nla_reserve(skb, padattr, 0))
return -EMSGSIZE;
#endif #endif
return 0; return 0;
} }
...@@ -1261,7 +1264,7 @@ static inline int nla_align_64bit(struct sk_buff *skb, int padattr) ...@@ -1261,7 +1264,7 @@ static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
static inline int nla_total_size_64bit(int payload) static inline int nla_total_size_64bit(int payload)
{ {
return NLA_ALIGN(nla_attr_size(payload)) return NLA_ALIGN(nla_attr_size(payload))
#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ NLA_ALIGN(nla_attr_size(0)) + NLA_ALIGN(nla_attr_size(0))
#endif #endif
; ;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment