Commit 43312915 authored by Cong Wang's avatar Cong Wang Committed by Daniel Borkmann

skmsg: Get rid of unncessary memset()

We always allocate skmsg with kzalloc(), so there is no need
to call memset(0) on it, the only thing we need from
sk_msg_init() is sg_init_marker(). So introduce a new helper
which is just kzalloc()+sg_init_marker(), this saves an
unncessary memset(0) for skmsg on fast path.
Signed-off-by: default avatarCong Wang <cong.wang@bytedance.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20220615162014.89193-5-xiyou.wangcong@gmail.com
parent 57452d76
...@@ -497,23 +497,27 @@ bool sk_msg_is_readable(struct sock *sk) ...@@ -497,23 +497,27 @@ bool sk_msg_is_readable(struct sock *sk)
} }
EXPORT_SYMBOL_GPL(sk_msg_is_readable); EXPORT_SYMBOL_GPL(sk_msg_is_readable);
static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, static struct sk_msg *alloc_sk_msg(void)
struct sk_buff *skb)
{ {
struct sk_msg *msg; struct sk_msg *msg;
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
if (unlikely(!msg))
return NULL; return NULL;
sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
return msg;
}
if (!sk_rmem_schedule(sk, skb, skb->truesize)) static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
struct sk_buff *skb)
{
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
return NULL; return NULL;
msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL); if (!sk_rmem_schedule(sk, skb, skb->truesize))
if (unlikely(!msg))
return NULL; return NULL;
sk_msg_init(msg); return alloc_sk_msg();
return msg;
} }
static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
...@@ -590,13 +594,12 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, ...@@ -590,13 +594,12 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
u32 off, u32 len) u32 off, u32 len)
{ {
struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC); struct sk_msg *msg = alloc_sk_msg();
struct sock *sk = psock->sk; struct sock *sk = psock->sk;
int err; int err;
if (unlikely(!msg)) if (unlikely(!msg))
return -EAGAIN; return -EAGAIN;
sk_msg_init(msg);
skb_set_owner_r(skb, sk); skb_set_owner_r(skb, sk);
err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
if (err < 0) if (err < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment