Commit 15ec928a authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig

nvme-tcp: have queue prod/cons send list become a llist

The queue processing will splice to a queue local list, this should
alleviate some contention on the send_list lock, but also prepares
us to the next patch where we look on these lists for network stack
flag optimization.

Remove queue lock as its not used anymore.
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Tested-by: default avatarMark Wunderlich <mark.wunderlich@intel.com>
[hch: simplified a loop]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent ca8f4bee
...@@ -46,6 +46,7 @@ struct nvme_tcp_request { ...@@ -46,6 +46,7 @@ struct nvme_tcp_request {
u32 pdu_sent; u32 pdu_sent;
u16 ttag; u16 ttag;
struct list_head entry; struct list_head entry;
struct llist_node lentry;
__le32 ddgst; __le32 ddgst;
struct bio *curr_bio; struct bio *curr_bio;
...@@ -75,8 +76,8 @@ struct nvme_tcp_queue { ...@@ -75,8 +76,8 @@ struct nvme_tcp_queue {
struct work_struct io_work; struct work_struct io_work;
int io_cpu; int io_cpu;
spinlock_t lock;
struct mutex send_mutex; struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list; struct list_head send_list;
/* recv state */ /* recv state */
...@@ -266,10 +267,8 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, ...@@ -266,10 +267,8 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
struct nvme_tcp_queue *queue = req->queue; struct nvme_tcp_queue *queue = req->queue;
bool empty; bool empty;
spin_lock(&queue->lock); empty = llist_add(&req->lentry, &queue->req_list) &&
empty = list_empty(&queue->send_list) && !queue->request; list_empty(&queue->send_list) && !queue->request;
list_add_tail(&req->entry, &queue->send_list);
spin_unlock(&queue->lock);
/* /*
* if we're the first on the send_list and we can try to send * if we're the first on the send_list and we can try to send
...@@ -285,18 +284,33 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, ...@@ -285,18 +284,33 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
} }
} }
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_request *req;
struct llist_node *node;
for (node = llist_del_all(&queue->req_list); node; node = node->next) {
req = llist_entry(node, struct nvme_tcp_request, lentry);
list_add(&req->entry, &queue->send_list);
}
}
static inline struct nvme_tcp_request * static inline struct nvme_tcp_request *
nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
{ {
struct nvme_tcp_request *req; struct nvme_tcp_request *req;
spin_lock(&queue->lock);
req = list_first_entry_or_null(&queue->send_list, req = list_first_entry_or_null(&queue->send_list,
struct nvme_tcp_request, entry); struct nvme_tcp_request, entry);
if (req) if (!req) {
list_del(&req->entry); nvme_tcp_process_req_list(queue);
spin_unlock(&queue->lock); req = list_first_entry_or_null(&queue->send_list,
struct nvme_tcp_request, entry);
if (unlikely(!req))
return NULL;
}
list_del(&req->entry);
return req; return req;
} }
...@@ -1344,8 +1358,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, ...@@ -1344,8 +1358,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
int ret, rcv_pdu_size; int ret, rcv_pdu_size;
queue->ctrl = ctrl; queue->ctrl = ctrl;
init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list); INIT_LIST_HEAD(&queue->send_list);
spin_lock_init(&queue->lock);
mutex_init(&queue->send_mutex); mutex_init(&queue->send_mutex);
INIT_WORK(&queue->io_work, nvme_tcp_io_work); INIT_WORK(&queue->io_work, nvme_tcp_io_work);
queue->queue_size = queue_size; queue->queue_size = queue_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment