Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20209384 authored by Max Gurtovoy's avatar Max Gurtovoy Committed by Christoph Hellwig
Browse files

nvmet-rdma: add an error flow for post_recv failures



Posting receive buffer operation can fail, thus we should make
sure to have an error flow during initialization phase. While
we're here, add a debug print in case of a failure.

Signed-off-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 2fc464e2
Loading
Loading
Loading
Loading
+21 −5
Original line number Original line Diff line number Diff line
@@ -436,14 +436,21 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_cmd *cmd)
		struct nvmet_rdma_cmd *cmd)
{
{
	struct ib_recv_wr *bad_wr;
	struct ib_recv_wr *bad_wr;
	int ret;


	ib_dma_sync_single_for_device(ndev->device,
	ib_dma_sync_single_for_device(ndev->device,
		cmd->sge[0].addr, cmd->sge[0].length,
		cmd->sge[0].addr, cmd->sge[0].length,
		DMA_FROM_DEVICE);
		DMA_FROM_DEVICE);


	if (ndev->srq)
	if (ndev->srq)
		return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
		ret = ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
	return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
	else
		ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);

	if (unlikely(ret))
		pr_err("post_recv cmd failed\n");

	return ret;
}
}


static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
@@ -833,11 +840,16 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
	ndev->srq = srq;
	ndev->srq = srq;
	ndev->srq_size = srq_size;
	ndev->srq_size = srq_size;


	for (i = 0; i < srq_size; i++)
	for (i = 0; i < srq_size; i++) {
		nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
		ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
		if (ret)
			goto out_free_cmds;
	}


	return 0;
	return 0;


out_free_cmds:
	nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
out_destroy_srq:
out_destroy_srq:
	ib_destroy_srq(srq);
	ib_destroy_srq(srq);
	return ret;
	return ret;
@@ -982,13 +994,17 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
	if (!ndev->srq) {
	if (!ndev->srq) {
		for (i = 0; i < queue->recv_queue_size; i++) {
		for (i = 0; i < queue->recv_queue_size; i++) {
			queue->cmds[i].queue = queue;
			queue->cmds[i].queue = queue;
			nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
			ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
			if (ret)
				goto err_destroy_qp;
		}
		}
	}
	}


out:
out:
	return ret;
	return ret;


err_destroy_qp:
	rdma_destroy_qp(queue->cm_id);
err_destroy_cq:
err_destroy_cq:
	ib_free_cq(queue->cq);
	ib_free_cq(queue->cq);
	goto out;
	goto out;