Commit 40d0802b authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

gro: __napi_gro_receive() optimizations

compare_ether_header() can have a special implementation on 64 bit
arches if CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined.

__napi_gro_receive() and vlan_gro_common() can avoid a conditional
branch to perform device match.

On x86_64, __napi_gro_receive() has now 38 instructions instead of 53

As gcc-4.4.3 still choose to not inline it, add inline keyword to this
performance critical function.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
CC: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 250ad8f5
...@@ -237,13 +237,29 @@ static inline bool is_etherdev_addr(const struct net_device *dev, ...@@ -237,13 +237,29 @@ static inline bool is_etherdev_addr(const struct net_device *dev,
* entry points. * entry points.
*/ */
static inline int compare_ether_header(const void *a, const void *b) static inline unsigned long compare_ether_header(const void *a, const void *b)
{ {
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
unsigned long fold;
/*
* We want to compare 14 bytes:
* [a0 ... a13] ^ [b0 ... b13]
* Use two long XOR, ORed together, with an overlap of two bytes.
* [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] |
* [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13]
* This means the [a6 a7] ^ [b6 b7] part is done two times.
*/
fold = *(unsigned long *)a ^ *(unsigned long *)b;
fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
return fold;
#else
u32 *a32 = (u32 *)((u8 *)a + 2); u32 *a32 = (u32 *)((u8 *)a + 2);
u32 *b32 = (u32 *)((u8 *)b + 2); u32 *b32 = (u32 *)((u8 *)b + 2);
return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
(a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
#endif
} }
#endif /* _LINUX_ETHERDEVICE_H */ #endif /* _LINUX_ETHERDEVICE_H */
...@@ -105,9 +105,12 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, ...@@ -105,9 +105,12 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
goto drop; goto drop;
for (p = napi->gro_list; p; p = p->next) { for (p = napi->gro_list; p; p = p->next) {
NAPI_GRO_CB(p)->same_flow = unsigned long diffs;
p->dev == skb->dev && !compare_ether_header(
skb_mac_header(p), skb_gro_mac_header(skb)); diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
diffs |= compare_ether_header(skb_mac_header(p),
skb_gro_mac_header(skb));
NAPI_GRO_CB(p)->same_flow = !diffs;
NAPI_GRO_CB(p)->flush = 0; NAPI_GRO_CB(p)->flush = 0;
} }
......
...@@ -3169,16 +3169,18 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) ...@@ -3169,16 +3169,18 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
} }
EXPORT_SYMBOL(dev_gro_receive); EXPORT_SYMBOL(dev_gro_receive);
static gro_result_t static inline gro_result_t
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{ {
struct sk_buff *p; struct sk_buff *p;
for (p = napi->gro_list; p; p = p->next) { for (p = napi->gro_list; p; p = p->next) {
NAPI_GRO_CB(p)->same_flow = unsigned long diffs;
(p->dev == skb->dev) &&
!compare_ether_header(skb_mac_header(p), diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
diffs |= compare_ether_header(skb_mac_header(p),
skb_gro_mac_header(skb)); skb_gro_mac_header(skb));
NAPI_GRO_CB(p)->same_flow = !diffs;
NAPI_GRO_CB(p)->flush = 0; NAPI_GRO_CB(p)->flush = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment