Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a41081aa authored by Jianxin Xiong's avatar Jianxin Xiong Committed by Doug Ledford
Browse files

IB/rdmavt: Add support for ib_map_mr_sg



This implements the device specific function needed by the verbs
API function ib_map_mr_sg().

Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJianxin Xiong <jianxin.xiong@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5fd2b562
Loading
Loading
Loading
Loading
+51 −0
Original line number Diff line number Diff line
@@ -479,6 +479,57 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
	return &mr->ibmr;
}

/**
 * rvt_set_page - page assignment function called by ib_sg_to_pages
 * @ibmr: memory region
 * @addr: dma address of mapped page
 *
 * Return: 0 on success
 */
static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
{
	struct rvt_mr *mr = to_imr(ibmr);
	u32 ps = 1 << mr->mr.page_shift;
	u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
	int m, n;

	if (unlikely(mapped_segs == mr->mr.max_segs))
		return -ENOMEM;

	if (mr->mr.length == 0) {
		mr->mr.user_base = addr;
		mr->mr.iova = addr;
	}

	m = mapped_segs / RVT_SEGSZ;
	n = mapped_segs % RVT_SEGSZ;
	mr->mr.map[m]->segs[n].vaddr = (void *)addr;
	mr->mr.map[m]->segs[n].length = ps;
	mr->mr.length += ps;

	return 0;
}

/**
 * rvt_map_mr_sg - map sg list and set it the memory region
 * @ibmr: memory region
 * @sg: dma mapped scatterlist
 * @sg_nents: number of entries in sg
 * @sg_offset: offset in bytes into sg
 *
 * Return: number of sg elements mapped to the memory region
 */
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
		  int sg_nents, unsigned int *sg_offset)
{
	struct rvt_mr *mr = to_imr(ibmr);

	mr->mr.length = 0;
	mr->mr.page_shift = PAGE_SHIFT;
	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
			      rvt_set_page);
}

/**
 * rvt_alloc_fmr - allocate a fast memory region
 * @pd: the protection domain for this memory region
+2 −0
Original line number Diff line number Diff line
@@ -82,6 +82,8 @@ int rvt_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
			   enum ib_mr_type mr_type,
			   u32 max_num_sg);
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
		  int sg_nents, unsigned int *sg_offset);
struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
			     struct ib_fmr_attr *fmr_attr);
int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+7 −0
Original line number Diff line number Diff line
@@ -370,6 +370,7 @@ enum {
	REG_USER_MR,
	DEREG_MR,
	ALLOC_MR,
	MAP_MR_SG,
	ALLOC_FMR,
	MAP_PHYS_FMR,
	UNMAP_FMR,
@@ -634,6 +635,12 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
				      rvt_alloc_mr);
		break;

	case MAP_MR_SG:
		check_driver_override(rdi, offsetof(struct ib_device,
						    map_mr_sg),
				      rvt_map_mr_sg);
		break;

	case MAP_PHYS_FMR:
		check_driver_override(rdi, offsetof(struct ib_device,
						    map_phys_fmr),