summary refs log tree commit diff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-03-20 19:14:16 -0600
committerJens Axboe <axboe@kernel.dk>2020-03-20 19:14:16 -0600
commit83166ac82b53655b63d3a59f6fe44f20511e96f2 (patch)
tree23f26320e4de7a05ab792ee080227788b9cef04f
parentb53df2e7442c73a932fb74228147fb946e531585 (diff)
parent98fd5c723730f560e5bea919a64ac5b83d45eb72 (diff)
downloadlinux-83166ac82b53655b63d3a59f6fe44f20511e96f2.tar.gz
Merge branch 'nvme-5.6-rc6' of git://git.infradead.org/nvme into block-5.6
Pull NVMe fixes from Keith:

"Two late nvme fabrics fixes for 5.6: a double free with the rdma
 transport, and a regression fix for tcp; please pull."

* 'nvme-5.6-rc6' of git://git.infradead.org/nvme:
  nvmet-tcp: set MSG_MORE only if we actually have more to send
  nvme-rdma: Avoid double freeing of async event data
-rw-r--r--drivers/nvme/host/rdma.c8
-rw-r--r--drivers/nvme/target/tcp.c12
2 files changed, 14 insertions, 6 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3e85c5cacefd..0fe08c4dfd2f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -850,9 +850,11 @@ out_free_tagset:
 	if (new)
 		blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
 out_free_async_qe:
-	nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
-		sizeof(struct nvme_command), DMA_TO_DEVICE);
-	ctrl->async_event_sqe.data = NULL;
+	if (ctrl->async_event_sqe.data) {
+		nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+			sizeof(struct nvme_command), DMA_TO_DEVICE);
+		ctrl->async_event_sqe.data = NULL;
+	}
 out_free_queue:
 	nvme_rdma_free_queue(&ctrl->queues[0]);
 	return error;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index af674fc0bb1e..5bb5342b8d0c 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
 	return 1;
 }
 
-static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
+static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
 {
 	struct nvmet_tcp_queue *queue = cmd->queue;
 	int ret;
@@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
 	while (cmd->cur_sg) {
 		struct page *page = sg_page(cmd->cur_sg);
 		u32 left = cmd->cur_sg->length - cmd->offset;
+		int flags = MSG_DONTWAIT;
+
+		if ((!last_in_batch && cmd->queue->send_list_len) ||
+		    cmd->wbytes_done + left < cmd->req.transfer_len ||
+		    queue->data_digest || !queue->nvme_sq.sqhd_disabled)
+			flags |= MSG_MORE;
 
 		ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
-					left, MSG_DONTWAIT | MSG_MORE);
+					left, flags);
 		if (ret <= 0)
 			return ret;
 
@@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
 	}
 
 	if (cmd->state == NVMET_TCP_SEND_DATA) {
-		ret = nvmet_try_send_data(cmd);
+		ret = nvmet_try_send_data(cmd, last_in_batch);
 		if (ret <= 0)
 			goto done_send;
 	}