Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 68c6e9cd authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe
Browse files

nvmet/rdma: Use sgl_alloc() and sgl_free()



Use the sgl_alloc() and sgl_free() functions instead of open coding
these functions.

Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4442b56f
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -29,6 +29,7 @@ config NVME_TARGET_RDMA
	tristate "NVMe over Fabrics RDMA target support"
	tristate "NVMe over Fabrics RDMA target support"
	depends on INFINIBAND
	depends on INFINIBAND
	depends on NVME_TARGET
	depends on NVME_TARGET
	select SGL_ALLOC
	help
	help
	  This enables the NVMe RDMA target support, which allows exporting NVMe
	  This enables the NVMe RDMA target support, which allows exporting NVMe
	  devices over RDMA.
	  devices over RDMA.
+4 −59
Original line number Original line Diff line number Diff line
@@ -185,59 +185,6 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
}
}


static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
{
	struct scatterlist *sg;
	int count;

	if (!sgl || !nents)
		return;

	for_each_sg(sgl, sg, nents, count)
		__free_page(sg_page(sg));
	kfree(sgl);
}

static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
		u32 length)
{
	struct scatterlist *sg;
	struct page *page;
	unsigned int nent;
	int i = 0;

	nent = DIV_ROUND_UP(length, PAGE_SIZE);
	sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
	if (!sg)
		goto out;

	sg_init_table(sg, nent);

	while (length) {
		u32 page_len = min_t(u32, length, PAGE_SIZE);

		page = alloc_page(GFP_KERNEL);
		if (!page)
			goto out_free_pages;

		sg_set_page(&sg[i], page, page_len, 0);
		length -= page_len;
		i++;
	}
	*sgl = sg;
	*nents = nent;
	return 0;

out_free_pages:
	while (i > 0) {
		i--;
		__free_page(sg_page(&sg[i]));
	}
	kfree(sg);
out:
	return NVME_SC_INTERNAL;
}

static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
			struct nvmet_rdma_cmd *c, bool admin)
			struct nvmet_rdma_cmd *c, bool admin)
{
{
@@ -484,7 +431,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
	}
	}


	if (rsp->req.sg != &rsp->cmd->inline_sg)
	if (rsp->req.sg != &rsp->cmd->inline_sg)
		nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
		sgl_free(rsp->req.sg);


	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
		nvmet_rdma_process_wr_wait_list(queue);
		nvmet_rdma_process_wr_wait_list(queue);
@@ -621,16 +568,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
	u32 len = get_unaligned_le24(sgl->length);
	u32 len = get_unaligned_le24(sgl->length);
	u32 key = get_unaligned_le32(sgl->key);
	u32 key = get_unaligned_le32(sgl->key);
	int ret;
	int ret;
	u16 status;


	/* no data command? */
	/* no data command? */
	if (!len)
	if (!len)
		return 0;
		return 0;


	status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
	rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
			len);
	if (!rsp->req.sg)
	if (status)
		return NVME_SC_INTERNAL;
		return status;


	ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
	ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
			rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
			rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,