Commit 4f8cce2d authored by Hannes Reinecke's avatar Hannes Reinecke Committed by Keith Busch

nvmet-tcp: make nvmet_tcp_alloc_queue() a void function

The return value from nvmet_tcp_alloc_queue() are just used to
figure out if sock_release() need to be called. So this patch
moves sock_release() into nvmet_tcp_alloc_queue() and make it
a void function.
Signed-off-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarNitesh Shetty <nj.shetty@samsung.com>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent 3f123494
...@@ -1621,15 +1621,17 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) ...@@ -1621,15 +1621,17 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
return ret; return ret;
} }
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
struct socket *newsock) struct socket *newsock)
{ {
struct nvmet_tcp_queue *queue; struct nvmet_tcp_queue *queue;
int ret; int ret;
queue = kzalloc(sizeof(*queue), GFP_KERNEL); queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue) if (!queue) {
return -ENOMEM; ret = -ENOMEM;
goto out_release;
}
INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
INIT_WORK(&queue->io_work, nvmet_tcp_io_work); INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
...@@ -1666,7 +1668,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, ...@@ -1666,7 +1668,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret) if (ret)
goto out_destroy_sq; goto out_destroy_sq;
return 0; return;
out_destroy_sq: out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex); mutex_lock(&nvmet_tcp_queue_mutex);
list_del_init(&queue->queue_list); list_del_init(&queue->queue_list);
...@@ -1678,7 +1680,9 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, ...@@ -1678,7 +1680,9 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
ida_free(&nvmet_tcp_queue_ida, queue->idx); ida_free(&nvmet_tcp_queue_ida, queue->idx);
out_free_queue: out_free_queue:
kfree(queue); kfree(queue);
return ret; out_release:
pr_err("failed to allocate queue, error %d\n", ret);
sock_release(newsock);
} }
static void nvmet_tcp_accept_work(struct work_struct *w) static void nvmet_tcp_accept_work(struct work_struct *w)
...@@ -1695,11 +1699,7 @@ static void nvmet_tcp_accept_work(struct work_struct *w) ...@@ -1695,11 +1699,7 @@ static void nvmet_tcp_accept_work(struct work_struct *w)
pr_warn("failed to accept err=%d\n", ret); pr_warn("failed to accept err=%d\n", ret);
return; return;
} }
ret = nvmet_tcp_alloc_queue(port, newsock); nvmet_tcp_alloc_queue(port, newsock);
if (ret) {
pr_err("failed to allocate queue\n");
sock_release(newsock);
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment