Commit 0568d1a2 authored by Thomas Graf's avatar Thomas Graf

[NET] Reorder struct sock

Reorders struct sock to avoid padding and shrinks the following
fields to more appropriate sizes saving 12 bytes and some more
on 64bit architectures.

sk_shutdown: char -> 2 bits
sk_no_checks: char -> 2 bits
sk_userlocks: char -> 4 bits
Signed-off-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 62c06801
...@@ -184,28 +184,30 @@ struct sock { ...@@ -184,28 +184,30 @@ struct sock {
#define sk_node __sk_common.skc_node #define sk_node __sk_common.skc_node
#define sk_bind_node __sk_common.skc_bind_node #define sk_bind_node __sk_common.skc_bind_node
#define sk_refcnt __sk_common.skc_refcnt #define sk_refcnt __sk_common.skc_refcnt
unsigned char sk_shutdown; unsigned char sk_shutdown : 2,
unsigned char sk_userlocks; sk_no_check : 2,
socket_lock_t sk_lock; sk_userlocks : 4;
unsigned char sk_protocol;
unsigned short sk_type;
int sk_rcvbuf; int sk_rcvbuf;
socket_lock_t sk_lock;
wait_queue_head_t *sk_sleep; wait_queue_head_t *sk_sleep;
struct dst_entry *sk_dst_cache; struct dst_entry *sk_dst_cache;
rwlock_t sk_dst_lock;
struct xfrm_policy *sk_policy[2]; struct xfrm_policy *sk_policy[2];
rwlock_t sk_dst_lock;
atomic_t sk_rmem_alloc; atomic_t sk_rmem_alloc;
struct sk_buff_head sk_receive_queue;
atomic_t sk_wmem_alloc; atomic_t sk_wmem_alloc;
struct sk_buff_head sk_write_queue;
atomic_t sk_omem_alloc; atomic_t sk_omem_alloc;
struct sk_buff_head sk_receive_queue;
struct sk_buff_head sk_write_queue;
int sk_wmem_queued; int sk_wmem_queued;
int sk_forward_alloc; int sk_forward_alloc;
unsigned int sk_allocation; unsigned int sk_allocation;
int sk_sndbuf; int sk_sndbuf;
unsigned long sk_flags;
char sk_no_check;
int sk_route_caps; int sk_route_caps;
unsigned long sk_lingertime;
int sk_hashent; int sk_hashent;
unsigned long sk_flags;
unsigned long sk_lingertime;
/* /*
* The backlog queue is special, it is always used with * The backlog queue is special, it is always used with
* the per-socket spinlock held and requires low latency * the per-socket spinlock held and requires low latency
...@@ -215,16 +217,14 @@ struct sock { ...@@ -215,16 +217,14 @@ struct sock {
struct sk_buff *head; struct sk_buff *head;
struct sk_buff *tail; struct sk_buff *tail;
} sk_backlog; } sk_backlog;
rwlock_t sk_callback_lock;
struct sk_buff_head sk_error_queue; struct sk_buff_head sk_error_queue;
struct proto *sk_prot; struct proto *sk_prot;
rwlock_t sk_callback_lock;
int sk_err, int sk_err,
sk_err_soft; sk_err_soft;
unsigned short sk_ack_backlog; unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog; unsigned short sk_max_ack_backlog;
__u32 sk_priority; __u32 sk_priority;
unsigned short sk_type;
unsigned char sk_protocol;
struct ucred sk_peercred; struct ucred sk_peercred;
int sk_rcvlowat; int sk_rcvlowat;
long sk_rcvtimeo; long sk_rcvtimeo;
...@@ -238,11 +238,10 @@ struct sock { ...@@ -238,11 +238,10 @@ struct sock {
void *sk_user_data; void *sk_user_data;
struct module *sk_owner; struct module *sk_owner;
struct page *sk_sndmsg_page; struct page *sk_sndmsg_page;
__u32 sk_sndmsg_off;
struct sk_buff *sk_send_head; struct sk_buff *sk_send_head;
__u32 sk_sndmsg_off;
int sk_write_pending; int sk_write_pending;
void *sk_security; void *sk_security;
/* three bytes hole, try to pack */
void (*sk_state_change)(struct sock *sk); void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_data_ready)(struct sock *sk, int bytes);
void (*sk_write_space)(struct sock *sk); void (*sk_write_space)(struct sock *sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment