Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0042f9e4 authored by Mark Bloch's avatar Mark Bloch Committed by Doug Ledford
Browse files

RDMA/mlx5: Enable vport loopback when user context or QP mandate



A user can create a QP which can accept loopback traffic, but that's not
enough. We need to enable loopback on the vport as well. Currently vport
loopback is enabled only when more than 1 users are using the IB device,
update the logic to consider whatever a QP which supports loopback was
created, if so enable vport loopback even if there is only a single user.

Signed-off-by: default avatarMark Bloch <markb@mellanox.com>
Reviewed-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 175edba8
Loading
Loading
Loading
Loading
+28 −12
Original line number Diff line number Diff line
@@ -1571,28 +1571,44 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
			mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
}

static int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev)
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
	int err = 0;

	mutex_lock(&dev->lb.mutex);
	if (td)
		dev->lb.user_td++;
	if (qp)
		dev->lb.qps++;

	if (dev->lb.user_td == 2)
	if (dev->lb.user_td == 2 ||
	    dev->lb.qps == 1) {
		if (!dev->lb.enabled) {
			err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
			dev->lb.enabled = true;
		}
	}

	mutex_unlock(&dev->lb.mutex);

	return err;
}

static void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev)
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
	mutex_lock(&dev->lb.mutex);
	if (td)
		dev->lb.user_td--;
	if (qp)
		dev->lb.qps--;

	if (dev->lb.user_td < 2)
	if (dev->lb.user_td == 1 &&
	    dev->lb.qps == 0) {
		if (dev->lb.enabled) {
			mlx5_nic_vport_update_local_lb(dev->mdev, false);
			dev->lb.enabled = false;
		}
	}

	mutex_unlock(&dev->lb.mutex);
}
@@ -1613,7 +1629,7 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
		return err;

	return mlx5_ib_enable_lb(dev);
	return mlx5_ib_enable_lb(dev, true, false);
}

static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
@@ -1628,7 +1644,7 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
		return;

	mlx5_ib_disable_lb(dev);
	mlx5_ib_disable_lb(dev, true, false);
}

static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
+4 −0
Original line number Diff line number Diff line
@@ -862,6 +862,8 @@ struct mlx5_ib_lb_state {
	/* protect the user_td */
	struct mutex		mutex;
	u32			user_td;
	int			qps;
	bool			enabled;
};

struct mlx5_ib_dev {
@@ -1020,6 +1022,8 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq);
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
			  const struct ib_recv_wr **bad_wr);
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
				struct ib_qp_init_attr *init_attr,
				struct ib_udata *udata);
+27 −7
Original line number Diff line number Diff line
@@ -1256,6 +1256,16 @@ static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
		 MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
}

static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
				      struct mlx5_ib_rq *rq,
				      u32 qp_flags_en)
{
	if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
		mlx5_ib_disable_lb(dev, false, true);
	mlx5_core_destroy_tir(dev->mdev, rq->tirn);
}

static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
				    struct mlx5_ib_rq *rq, u32 tdn,
				    u32 *qp_flags_en)
@@ -1293,15 +1303,15 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,

	err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);

	kvfree(in);
	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
		err = mlx5_ib_enable_lb(dev, false, true);

	return err;
		if (err)
			destroy_raw_packet_qp_tir(dev, rq, 0);
	}
	kvfree(in);

static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
				      struct mlx5_ib_rq *rq)
{
	mlx5_core_destroy_tir(dev->mdev, rq->tirn);
	return err;
}

static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
@@ -1372,7 +1382,7 @@ static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;

	if (qp->rq.wqe_cnt) {
		destroy_raw_packet_qp_tir(dev, rq);
		destroy_raw_packet_qp_tir(dev, rq, qp->flags_en);
		destroy_raw_packet_qp_rq(dev, rq);
	}

@@ -1396,6 +1406,9 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,

static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
	if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
			    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
		mlx5_ib_disable_lb(dev, false, true);
	mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
}

@@ -1606,6 +1619,13 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
create_tir:
	err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);

	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
		err = mlx5_ib_enable_lb(dev, false, true);

		if (err)
			mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
	}

	if (err)
		goto err;