Commit dea31328 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.1-2022-10-27' of git://git.infradead.org/nvme into block-6.1

Pull NVMe fixes from Christoph:

"nvme fixes for Linux 6.1

 - make the multipath dma alignment to match the non-multipath one
   (Keith Busch)
 - fix a bogus use of sg_init_marker() (Nam Cao)
 - fix circulr locking in nvme-tcp (Sagi Grimberg)"

* tag 'nvme-6.1-2022-10-27' of git://git.infradead.org/nvme:
  nvme-multipath: set queue dma alignment to 3
  nvme-tcp: fix possible circular locking when deleting a controller under memory pressure
  nvme-tcp: replace sg_init_marker() with sg_init_table()
parents 2d87d455 fe8714b0
......@@ -516,6 +516,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
/* set to a default value of 512 until the disk is validated */
blk_queue_logical_block_size(head->disk->queue, 512);
blk_set_stacking_limits(&head->disk->queue->limits);
blk_queue_dma_alignment(head->disk->queue, 3);
/* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
......
......@@ -387,7 +387,7 @@ static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
{
struct scatterlist sg;
sg_init_marker(&sg, 1);
sg_init_table(&sg, 1);
sg_set_page(&sg, page, len, off);
ahash_request_set_crypt(hash, &sg, NULL, len);
crypto_ahash_update(hash);
......@@ -1141,6 +1141,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_request *req;
unsigned int noreclaim_flag;
int ret = 1;
if (!queue->request) {
......@@ -1150,12 +1151,13 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
}
req = queue->request;
noreclaim_flag = memalloc_noreclaim_save();
if (req->state == NVME_TCP_SEND_CMD_PDU) {
ret = nvme_tcp_try_send_cmd_pdu(req);
if (ret <= 0)
goto done;
if (!nvme_tcp_has_inline_data(req))
return ret;
goto out;
}
if (req->state == NVME_TCP_SEND_H2C_PDU) {
......@@ -1181,6 +1183,8 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
nvme_tcp_fail_request(queue->request);
nvme_tcp_done_send_req(queue);
}
out:
memalloc_noreclaim_restore(noreclaim_flag);
return ret;
}
......@@ -1296,6 +1300,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
unsigned int noreclaim_flag;
if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
return;
......@@ -1308,7 +1313,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
queue->pf_cache.va = NULL;
}
noreclaim_flag = memalloc_noreclaim_save();
sock_release(queue->sock);
memalloc_noreclaim_restore(noreclaim_flag);
kfree(queue->pdu);
mutex_destroy(&queue->send_mutex);
mutex_destroy(&queue->queue_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment