Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2f5ff264 authored by Eli Cohen's avatar Eli Cohen Committed by Leon Romanovsky
Browse files

mlx5: Fix naming convention with respect to UARs



This establishes a solid naming conventions for UARs. A UAR (User Access
Region) can have size identical to a system page or can be fixed 4KB
depending on a value queried by firmware. Each UAR always has 4 blue
flame register which are used to post doorbell to send queue. In
addition, a UAR has section used for posting doorbells to CQs or EQs. In
this patch we change names to reflect this conventions.

Signed-off-by: default avatarEli Cohen <eli@mellanox.com>
Reviewed-by: default avatarMatan Barak <matanb@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent f4044dac
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
	struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
	struct mlx5_ib_cq *cq = to_mcq(ibcq);
	void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
	void __iomem *uar_page = mdev->priv.bfregi.uars[0].map;
	unsigned long irq_flags;
	int ret = 0;

@@ -790,7 +790,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
	MLX5_SET(cqc, cqc, log_page_size,
		 page_shift - MLX5_ADAPTER_PAGE_SHIFT);

	*index = to_mucontext(context)->uuari.uars[0].index;
	*index = to_mucontext(context)->bfregi.uars[0].index;

	if (ucmd.cqe_comp_en == 1) {
		if (unlikely((*cqe_size != 64) ||
@@ -886,7 +886,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
	MLX5_SET(cqc, cqc, log_page_size,
		 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);

	*index = dev->mdev->priv.uuari.uars[0].index;
	*index = dev->mdev->priv.bfregi.uars[0].index;

	return 0;

+41 −39
Original line number Diff line number Diff line
@@ -999,12 +999,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
	struct mlx5_ib_alloc_ucontext_resp resp = {};
	struct mlx5_ib_ucontext *context;
	struct mlx5_uuar_info *uuari;
	struct mlx5_bfreg_info *bfregi;
	struct mlx5_uar *uars;
	int gross_uuars;
	int gross_bfregs;
	int num_uars;
	int ver;
	int uuarn;
	int bfregn;
	int err;
	int i;
	size_t reqlen;
@@ -1032,10 +1032,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
	if (req.flags)
		return ERR_PTR(-EINVAL);

	if (req.total_num_uuars > MLX5_MAX_UUARS)
	if (req.total_num_bfregs > MLX5_MAX_BFREGS)
		return ERR_PTR(-ENOMEM);

	if (req.total_num_uuars == 0)
	if (req.total_num_bfregs == 0)
		return ERR_PTR(-EINVAL);

	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
@@ -1046,13 +1046,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
				 reqlen - sizeof(req)))
		return ERR_PTR(-EOPNOTSUPP);

	req.total_num_uuars = ALIGN(req.total_num_uuars,
				    MLX5_NON_FP_BF_REGS_PER_PAGE);
	if (req.num_low_latency_uuars > req.total_num_uuars - 1)
	req.total_num_bfregs = ALIGN(req.total_num_bfregs,
				    MLX5_NON_FP_BFREGS_PER_UAR);
	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
		return ERR_PTR(-EINVAL);

	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
	num_uars = req.total_num_bfregs / MLX5_NON_FP_BFREGS_PER_UAR;
	gross_bfregs = num_uars * MLX5_BFREGS_PER_UAR;
	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
	if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
		resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
@@ -1072,32 +1072,33 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
	if (!context)
		return ERR_PTR(-ENOMEM);

	uuari = &context->uuari;
	mutex_init(&uuari->lock);
	bfregi = &context->bfregi;
	mutex_init(&bfregi->lock);
	uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
	if (!uars) {
		err = -ENOMEM;
		goto out_ctx;
	}

	uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
				sizeof(*uuari->bitmap),
	bfregi->bitmap = kcalloc(BITS_TO_LONGS(gross_bfregs),
				sizeof(*bfregi->bitmap),
				GFP_KERNEL);
	if (!uuari->bitmap) {
	if (!bfregi->bitmap) {
		err = -ENOMEM;
		goto out_uar_ctx;
	}
	/*
	 * clear all fast path uuars
	 * clear all fast path bfregs
	 */
	for (i = 0; i < gross_uuars; i++) {
		uuarn = i & 3;
		if (uuarn == 2 || uuarn == 3)
			set_bit(i, uuari->bitmap);
	for (i = 0; i < gross_bfregs; i++) {
		bfregn = i & 3;
		if (bfregn == 2 || bfregn == 3)
			set_bit(i, bfregi->bitmap);
	}

	uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
	if (!uuari->count) {
	bfregi->count = kcalloc(gross_bfregs,
				sizeof(*bfregi->count), GFP_KERNEL);
	if (!bfregi->count) {
		err = -ENOMEM;
		goto out_bitmap;
	}
@@ -1130,7 +1131,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
	INIT_LIST_HEAD(&context->db_page_list);
	mutex_init(&context->db_page_mutex);

	resp.tot_uuars = req.total_num_uuars;
	resp.tot_bfregs = req.total_num_bfregs;
	resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);

	if (field_avail(typeof(resp), cqe_version, udata->outlen))
@@ -1163,10 +1164,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
	if (err)
		goto out_td;

	uuari->ver = ver;
	uuari->num_low_latency_uuars = req.num_low_latency_uuars;
	uuari->uars = uars;
	uuari->num_uars = num_uars;
	bfregi->ver = ver;
	bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
	bfregi->uars = uars;
	bfregi->num_uars = num_uars;
	context->cqe_version = resp.cqe_version;

	return &context->ibucontext;
@@ -1182,10 +1183,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
	for (i--; i >= 0; i--)
		mlx5_cmd_free_uar(dev->mdev, uars[i].index);
out_count:
	kfree(uuari->count);
	kfree(bfregi->count);

out_bitmap:
	kfree(uuari->bitmap);
	kfree(bfregi->bitmap);

out_uar_ctx:
	kfree(uars);
@@ -1199,7 +1200,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
	struct mlx5_uuar_info *uuari = &context->uuari;
	struct mlx5_bfreg_info *bfregi = &context->bfregi;
	int i;

	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
@@ -1207,14 +1208,15 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)

	free_page(context->upd_xlt_page);

	for (i = 0; i < uuari->num_uars; i++) {
		if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
			mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
	for (i = 0; i < bfregi->num_uars; i++) {
		if (mlx5_cmd_free_uar(dev->mdev, bfregi->uars[i].index))
			mlx5_ib_warn(dev, "Failed to free UAR 0x%x\n",
				     bfregi->uars[i].index);
	}

	kfree(uuari->count);
	kfree(uuari->bitmap);
	kfree(uuari->uars);
	kfree(bfregi->count);
	kfree(bfregi->bitmap);
	kfree(bfregi->uars);
	kfree(context);

	return 0;
@@ -1377,7 +1379,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
		    struct vm_area_struct *vma,
		    struct mlx5_ib_ucontext *context)
{
	struct mlx5_uuar_info *uuari = &context->uuari;
	struct mlx5_bfreg_info *bfregi = &context->bfregi;
	int err;
	unsigned long idx;
	phys_addr_t pfn, pa;
@@ -1408,10 +1410,10 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
		return -EINVAL;

	idx = get_index(vma->vm_pgoff);
	if (idx >= uuari->num_uars)
	if (idx >= bfregi->num_uars)
		return -EINVAL;

	pfn = uar_index2pfn(dev, uuari->uars[idx].index);
	pfn = uar_index2pfn(dev, bfregi->uars[idx].index);
	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);

	vma->vm_page_prot = prot;
+3 −3
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ enum mlx5_ib_mad_ifc_flags {
};

enum {
	MLX5_CROSS_CHANNEL_UUAR         = 0,
	MLX5_CROSS_CHANNEL_BFREG         = 0,
};

enum {
@@ -120,7 +120,7 @@ struct mlx5_ib_ucontext {
	/* protect doorbell record alloc/free
	 */
	struct mutex		db_page_mutex;
	struct mlx5_uuar_info	uuari;
	struct mlx5_bfreg_info	bfregi;
	u8			cqe_version;
	/* Transport Domain number */
	u32			tdn;
@@ -355,7 +355,7 @@ struct mlx5_ib_qp {
	/* only for user space QPs. For kernel
	 * we have it from the bf object
	 */
	int			uuarn;
	int			bfregn;

	int			create_type;

+88 −88
Original line number Diff line number Diff line
@@ -475,12 +475,12 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
	return 1;
}

static int first_med_uuar(void)
static int first_med_bfreg(void)
{
	return 1;
}

static int next_uuar(int n)
static int next_bfreg(int n)
{
	n++;

@@ -490,45 +490,45 @@ static int next_uuar(int n)
	return n;
}

static int num_med_uuar(struct mlx5_uuar_info *uuari)
static int num_med_bfreg(struct mlx5_bfreg_info *bfregi)
{
	int n;

	n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
		uuari->num_low_latency_uuars - 1;
	n = bfregi->num_uars * MLX5_NON_FP_BFREGS_PER_UAR -
		bfregi->num_low_latency_bfregs - 1;

	return n >= 0 ? n : 0;
}

static int max_uuari(struct mlx5_uuar_info *uuari)
static int max_bfregi(struct mlx5_bfreg_info *bfregi)
{
	return uuari->num_uars * 4;
	return bfregi->num_uars * 4;
}

static int first_hi_uuar(struct mlx5_uuar_info *uuari)
static int first_hi_bfreg(struct mlx5_bfreg_info *bfregi)
{
	int med;
	int i;
	int t;

	med = num_med_uuar(uuari);
	for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
	med = num_med_bfreg(bfregi);
	for (t = 0, i = first_med_bfreg();; i = next_bfreg(i)) {
		t++;
		if (t == med)
			return next_uuar(i);
			return next_bfreg(i);
	}

	return 0;
}

static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
static int alloc_high_class_bfreg(struct mlx5_bfreg_info *bfregi)
{
	int i;

	for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
		if (!test_bit(i, uuari->bitmap)) {
			set_bit(i, uuari->bitmap);
			uuari->count[i]++;
	for (i = first_hi_bfreg(bfregi); i < max_bfregi(bfregi); i = next_bfreg(i)) {
		if (!test_bit(i, bfregi->bitmap)) {
			set_bit(i, bfregi->bitmap);
			bfregi->count[i]++;
			return i;
		}
	}
@@ -536,87 +536,87 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
	return -ENOMEM;
}

static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
static int alloc_med_class_bfreg(struct mlx5_bfreg_info *bfregi)
{
	int minidx = first_med_uuar();
	int minidx = first_med_bfreg();
	int i;

	for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
		if (uuari->count[i] < uuari->count[minidx])
	for (i = first_med_bfreg(); i < first_hi_bfreg(bfregi); i = next_bfreg(i)) {
		if (bfregi->count[i] < bfregi->count[minidx])
			minidx = i;
	}

	uuari->count[minidx]++;
	bfregi->count[minidx]++;
	return minidx;
}

static int alloc_uuar(struct mlx5_uuar_info *uuari,
static int alloc_bfreg(struct mlx5_bfreg_info *bfregi,
		       enum mlx5_ib_latency_class lat)
{
	int uuarn = -EINVAL;
	int bfregn = -EINVAL;

	mutex_lock(&uuari->lock);
	mutex_lock(&bfregi->lock);
	switch (lat) {
	case MLX5_IB_LATENCY_CLASS_LOW:
		uuarn = 0;
		uuari->count[uuarn]++;
		bfregn = 0;
		bfregi->count[bfregn]++;
		break;

	case MLX5_IB_LATENCY_CLASS_MEDIUM:
		if (uuari->ver < 2)
			uuarn = -ENOMEM;
		if (bfregi->ver < 2)
			bfregn = -ENOMEM;
		else
			uuarn = alloc_med_class_uuar(uuari);
			bfregn = alloc_med_class_bfreg(bfregi);
		break;

	case MLX5_IB_LATENCY_CLASS_HIGH:
		if (uuari->ver < 2)
			uuarn = -ENOMEM;
		if (bfregi->ver < 2)
			bfregn = -ENOMEM;
		else
			uuarn = alloc_high_class_uuar(uuari);
			bfregn = alloc_high_class_bfreg(bfregi);
		break;

	case MLX5_IB_LATENCY_CLASS_FAST_PATH:
		uuarn = 2;
		bfregn = 2;
		break;
	}
	mutex_unlock(&uuari->lock);
	mutex_unlock(&bfregi->lock);

	return uuarn;
	return bfregn;
}

static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
static void free_med_class_bfreg(struct mlx5_bfreg_info *bfregi, int bfregn)
{
	clear_bit(uuarn, uuari->bitmap);
	--uuari->count[uuarn];
	clear_bit(bfregn, bfregi->bitmap);
	--bfregi->count[bfregn];
}

static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
static void free_high_class_bfreg(struct mlx5_bfreg_info *bfregi, int bfregn)
{
	clear_bit(uuarn, uuari->bitmap);
	--uuari->count[uuarn];
	clear_bit(bfregn, bfregi->bitmap);
	--bfregi->count[bfregn];
}

static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
static void free_bfreg(struct mlx5_bfreg_info *bfregi, int bfregn)
{
	int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
	int high_uuar = nuuars - uuari->num_low_latency_uuars;
	int nbfregs = bfregi->num_uars * MLX5_BFREGS_PER_UAR;
	int high_bfreg = nbfregs - bfregi->num_low_latency_bfregs;

	mutex_lock(&uuari->lock);
	if (uuarn == 0) {
		--uuari->count[uuarn];
	mutex_lock(&bfregi->lock);
	if (bfregn == 0) {
		--bfregi->count[bfregn];
		goto out;
	}

	if (uuarn < high_uuar) {
		free_med_class_uuar(uuari, uuarn);
	if (bfregn < high_bfreg) {
		free_med_class_bfreg(bfregi, bfregn);
		goto out;
	}

	free_high_class_uuar(uuari, uuarn);
	free_high_class_bfreg(bfregi, bfregn);

out:
	mutex_unlock(&uuari->lock);
	mutex_unlock(&bfregi->lock);
}

static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
@@ -657,9 +657,9 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
			       struct mlx5_ib_cq *recv_cq);

static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
static int bfregn_to_uar_index(struct mlx5_bfreg_info *bfregi, int bfregn)
{
	return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
	return bfregi->uars[bfregn / MLX5_BFREGS_PER_UAR].index;
}

static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
@@ -776,7 +776,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
	int uar_index;
	int npages;
	u32 offset = 0;
	int uuarn;
	int bfregn;
	int ncont = 0;
	__be64 *pas;
	void *qpc;
@@ -794,27 +794,27 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
	 */
	if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
		/* In CROSS_CHANNEL CQ and QP must use the same UAR */
		uuarn = MLX5_CROSS_CHANNEL_UUAR;
		bfregn = MLX5_CROSS_CHANNEL_BFREG;
	else {
		uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
		if (uuarn < 0) {
			mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
		bfregn = alloc_bfreg(&context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
		if (bfregn < 0) {
			mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
			mlx5_ib_dbg(dev, "reverting to medium latency\n");
			uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
			if (uuarn < 0) {
				mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
			bfregn = alloc_bfreg(&context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
			if (bfregn < 0) {
				mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
				mlx5_ib_dbg(dev, "reverting to high latency\n");
				uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
				if (uuarn < 0) {
					mlx5_ib_warn(dev, "uuar allocation failed\n");
					return uuarn;
				bfregn = alloc_bfreg(&context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
				if (bfregn < 0) {
					mlx5_ib_warn(dev, "bfreg allocation failed\n");
					return bfregn;
				}
			}
		}
	}

	uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
	mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
	uar_index = bfregn_to_uar_index(&context->bfregi, bfregn);
	mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);

	qp->rq.offset = 0;
	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -822,7 +822,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,

	err = set_user_buf_size(dev, qp, &ucmd, base, attr);
	if (err)
		goto err_uuar;
		goto err_bfreg;

	if (ucmd.buf_addr && ubuffer->buf_size) {
		ubuffer->buf_addr = ucmd.buf_addr;
@@ -831,7 +831,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
				       &ubuffer->umem, &npages, &page_shift,
				       &ncont, &offset);
		if (err)
			goto err_uuar;
			goto err_bfreg;
	} else {
		ubuffer->umem = NULL;
	}
@@ -854,8 +854,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
	MLX5_SET(qpc, qpc, page_offset, offset);

	MLX5_SET(qpc, qpc, uar_page, uar_index);
	resp->uuar_index = uuarn;
	qp->uuarn = uuarn;
	resp->bfreg_index = bfregn;
	qp->bfregn = bfregn;

	err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
	if (err) {
@@ -882,8 +882,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
	if (ubuffer->umem)
		ib_umem_release(ubuffer->umem);

err_uuar:
	free_uuar(&context->uuari, uuarn);
err_bfreg:
	free_bfreg(&context->bfregi, bfregn);
	return err;
}

@@ -896,7 +896,7 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
	mlx5_ib_db_unmap_user(context, &qp->db);
	if (base->ubuffer.umem)
		ib_umem_release(base->ubuffer.umem);
	free_uuar(&context->uuari, qp->uuarn);
	free_bfreg(&context->bfregi, qp->bfregn);
}

static int create_kernel_qp(struct mlx5_ib_dev *dev,
@@ -906,13 +906,13 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
			    struct mlx5_ib_qp_base *base)
{
	enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
	struct mlx5_uuar_info *uuari;
	struct mlx5_bfreg_info *bfregi;
	int uar_index;
	void *qpc;
	int uuarn;
	int bfregn;
	int err;

	uuari = &dev->mdev->priv.uuari;
	bfregi = &dev->mdev->priv.bfregi;
	if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
					IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
					IB_QP_CREATE_IPOIB_UD_LSO |
@@ -922,19 +922,19 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
	if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
		lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;

	uuarn = alloc_uuar(uuari, lc);
	if (uuarn < 0) {
	bfregn = alloc_bfreg(bfregi, lc);
	if (bfregn < 0) {
		mlx5_ib_dbg(dev, "\n");
		return -ENOMEM;
	}

	qp->bf = &uuari->bfs[uuarn];
	qp->bf = &bfregi->bfs[bfregn];
	uar_index = qp->bf->uar->index;

	err = calc_sq_size(dev, init_attr, qp);
	if (err < 0) {
		mlx5_ib_dbg(dev, "err %d\n", err);
		goto err_uuar;
		goto err_bfreg;
	}

	qp->rq.offset = 0;
@@ -944,7 +944,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
	err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
	if (err) {
		mlx5_ib_dbg(dev, "err %d\n", err);
		goto err_uuar;
		goto err_bfreg;
	}

	qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
@@ -1007,8 +1007,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
err_buf:
	mlx5_buf_free(dev->mdev, &qp->buf);

err_uuar:
	free_uuar(&dev->mdev->priv.uuari, uuarn);
err_bfreg:
	free_bfreg(&dev->mdev->priv.bfregi, bfregn);
	return err;
}

@@ -1021,7 +1021,7 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
	kfree(qp->rq.wrid);
	mlx5_db_free(dev->mdev, &qp->db);
	mlx5_buf_free(dev->mdev, &qp->buf);
	free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
	free_bfreg(&dev->mdev->priv.bfregi, qp->bf->bfregn);
}

static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
@@ -1353,7 +1353,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
	if (init_attr->create_flags || init_attr->send_cq)
		return -EINVAL;

	min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index);
	min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
	if (udata->outlen < min_resp_len)
		return -EINVAL;

@@ -4132,7 +4132,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
			__acquire(&bf->lock);

		/* TBD enable WC */
		if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
		if (0 && nreq == 1 && bf->bfregn && inl && size > 1 && size <= bf->buf_size / 16) {
			mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
			/* wc_wmb(); */
		} else {
+4 −4
Original line number Diff line number Diff line
@@ -686,7 +686,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)

	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
				 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
				 "mlx5_cmd_eq", &dev->priv.uuari.uars[0],
				 "mlx5_cmd_eq", &dev->priv.bfregi.uars[0],
				 MLX5_EQ_TYPE_ASYNC);
	if (err) {
		mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
@@ -697,7 +697,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)

	err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
				 MLX5_NUM_ASYNC_EQE, async_event_mask,
				 "mlx5_async_eq", &dev->priv.uuari.uars[0],
				 "mlx5_async_eq", &dev->priv.bfregi.uars[0],
				 MLX5_EQ_TYPE_ASYNC);
	if (err) {
		mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
@@ -708,7 +708,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
				 MLX5_EQ_VEC_PAGES,
				 /* TODO: sriov max_vf + */ 1,
				 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
				 &dev->priv.uuari.uars[0],
				 &dev->priv.bfregi.uars[0],
				 MLX5_EQ_TYPE_ASYNC);
	if (err) {
		mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
@@ -722,7 +722,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
					 MLX5_NUM_ASYNC_EQE,
					 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
					 "mlx5_page_fault_eq",
					 &dev->priv.uuari.uars[0],
					 &dev->priv.bfregi.uars[0],
					 MLX5_EQ_TYPE_PF);
		if (err) {
			mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
Loading