Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 94423a8f authored by Max Gurtovoy's avatar Max Gurtovoy Committed by Christoph Hellwig
Browse files

nvme-rdma: fix error flow during mapping request data



After dma mapping the sgl, we map the sgl to nvme sgl descriptor. In case
of failure during the last mapping we never dma unmap the sgl.

Signed-off-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 2796b569
Loading
Loading
Loading
Loading
+24 −7
Original line number Diff line number Diff line
@@ -1189,21 +1189,38 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
	count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
		    rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
	if (unlikely(count <= 0)) {
		sg_free_table_chained(&req->sg_table, true);
		return -EIO;
		ret = -EIO;
		goto out_free_table;
	}

	if (count == 1) {
		if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
		    blk_rq_payload_bytes(rq) <=
				nvme_rdma_inline_data_size(queue))
			return nvme_rdma_map_sg_inline(queue, req, c);
				nvme_rdma_inline_data_size(queue)) {
			ret = nvme_rdma_map_sg_inline(queue, req, c);
			goto out;
		}

		if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
			return nvme_rdma_map_sg_single(queue, req, c);
		if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
			ret = nvme_rdma_map_sg_single(queue, req, c);
			goto out;
		}
	}

	return nvme_rdma_map_sg_fr(queue, req, c, count);
	ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
	if (unlikely(ret))
		goto out_unmap_sg;

	return 0;

out_unmap_sg:
	ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
			req->nents, rq_data_dir(rq) ==
			WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
out_free_table:
	sg_free_table_chained(&req->sg_table, true);
	return ret;
}

static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)