Commit 1fbfdfaa authored by Kuniyuki Iwashima's avatar Kuniyuki Iwashima Committed by Jakub Kicinski

af_unix: Allocate struct unix_vertex for each inflight AF_UNIX fd.

We will replace the garbage collection algorithm for AF_UNIX, where
we will consider each inflight AF_UNIX socket as a vertex and its file
descriptor as an edge in a directed graph.

This patch introduces a new struct unix_vertex representing a vertex
in the graph and adds its pointer to struct unix_sock.

When we send a fd using the SCM_RIGHTS message, we allocate struct
scm_fp_list to struct scm_cookie in scm_fp_copy().  Then, we bump
each refcount of the inflight fds' struct file and save them in
scm_fp_list.fp.

After that, unix_attach_fds() inexplicably clones scm_fp_list of
scm_cookie and sets it to skb.  (We will remove this part after
replacing GC.)

Here, we add a new function call in unix_attach_fds() to preallocate
struct unix_vertex per inflight AF_UNIX fd and link each vertex to
skb's scm_fp_list.vertices.

When sendmsg() succeeds later, if the socket of the inflight fd is
still not inflight yet, we will set the preallocated vertex to struct
unix_sock.vertex and link it to a global list unix_unvisited_vertices
under spin_lock(&unix_gc_lock).

If the socket is already inflight, we free the preallocated vertex.
This is to avoid taking the lock unnecessarily when sendmsg() could
fail later.

In the following patch, we will similarly allocate another struct
per edge, which will finally be linked to the inflight socket's
unix_vertex.edges.

And then, we will count the number of edges as unix_vertex.out_degree.
Signed-off-by: default avatarKuniyuki Iwashima <kuniyu@amazon.com>
Acked-by: default avatarPaolo Abeni <pabeni@redhat.com>
Link: https://lore.kernel.org/r/20240325202425.60930-2-kuniyu@amazon.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 50e2907e
......@@ -22,9 +22,17 @@ extern unsigned int unix_tot_inflight;
void unix_inflight(struct user_struct *user, struct file *fp);
void unix_notinflight(struct user_struct *user, struct file *fp);
int unix_prepare_fpl(struct scm_fp_list *fpl);
void unix_destroy_fpl(struct scm_fp_list *fpl);
void unix_gc(void);
void wait_for_unix_gc(struct scm_fp_list *fpl);
struct unix_vertex {
struct list_head edges;
struct list_head entry;
unsigned long out_degree;
};
struct sock *unix_peer_get(struct sock *sk);
#define UNIX_HASH_MOD (256 - 1)
......@@ -62,6 +70,7 @@ struct unix_sock {
struct path path;
struct mutex iolock, bindlock;
struct sock *peer;
struct unix_vertex *vertex;
struct list_head link;
unsigned long inflight;
spinlock_t lock;
......
......@@ -27,6 +27,9 @@ struct scm_fp_list {
short count;
short count_unix;
short max;
#ifdef CONFIG_UNIX
struct list_head vertices;
#endif
struct user_struct *user;
struct file *fp[SCM_MAX_FD];
};
......
......@@ -89,6 +89,9 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
fpl->count_unix = 0;
fpl->max = SCM_MAX_FD;
fpl->user = NULL;
#if IS_ENABLED(CONFIG_UNIX)
INIT_LIST_HEAD(&fpl->vertices);
#endif
}
fpp = &fpl->fp[fpl->count];
......@@ -376,8 +379,12 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
if (new_fpl) {
for (i = 0; i < fpl->count; i++)
get_file(fpl->fp[i]);
new_fpl->max = new_fpl->count;
new_fpl->user = get_uid(fpl->user);
#if IS_ENABLED(CONFIG_UNIX)
INIT_LIST_HEAD(&new_fpl->vertices);
#endif
}
return new_fpl;
}
......
......@@ -980,6 +980,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u->inflight = 0;
u->vertex = NULL;
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
......@@ -1805,6 +1806,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
for (i = scm->fp->count - 1; i >= 0; i--)
unix_inflight(scm->fp->user, scm->fp->fp[i]);
if (unix_prepare_fpl(UNIXCB(skb).fp))
return -ENOMEM;
return 0;
}
......@@ -1815,6 +1819,8 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
scm->fp = UNIXCB(skb).fp;
UNIXCB(skb).fp = NULL;
unix_destroy_fpl(scm->fp);
for (i = scm->fp->count - 1; i >= 0; i--)
unix_notinflight(scm->fp->user, scm->fp->fp[i]);
}
......
......@@ -101,6 +101,44 @@ struct unix_sock *unix_get_socket(struct file *filp)
return NULL;
}
static void unix_free_vertices(struct scm_fp_list *fpl)
{
struct unix_vertex *vertex, *next_vertex;
list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) {
list_del(&vertex->entry);
kfree(vertex);
}
}
int unix_prepare_fpl(struct scm_fp_list *fpl)
{
struct unix_vertex *vertex;
int i;
if (!fpl->count_unix)
return 0;
for (i = 0; i < fpl->count_unix; i++) {
vertex = kmalloc(sizeof(*vertex), GFP_KERNEL);
if (!vertex)
goto err;
list_add(&vertex->entry, &fpl->vertices);
}
return 0;
err:
unix_free_vertices(fpl);
return -ENOMEM;
}
void unix_destroy_fpl(struct scm_fp_list *fpl)
{
unix_free_vertices(fpl);
}
DEFINE_SPINLOCK(unix_gc_lock);
unsigned int unix_tot_inflight;
static LIST_HEAD(gc_candidates);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment