Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9603b61d authored by Jack Morgenstein's avatar Jack Morgenstein Committed by David S. Miller
Browse files

mlx5: Move pci device handling from mlx5_ib to mlx5_core



In preparation for a new mlx5 device which is VPI (i.e., ports can be
either IB or ETH), move the pci device functionality from mlx5_ib
to mlx5_core.

This involves the following changes:
1. Move mlx5_core_dev struct out of mlx5_ib_dev. mlx5_core_dev
   is now an independent structure maintained by mlx5_core.
   mlx5_ib_dev now has a pointer to that struct.
   This requires changing a lot of places where the core_dev
   struct was accessed via mlx5_ib_dev (now, this needs to
   be a pointer dereference).
2. All PCI initializations are now done in mlx5_core. Thus,
   it is now mlx5_core which does pci_register_device (and not
   mlx5_ib, as was previously).
3. mlx5_ib now registers itself with mlx5_core as an "interface"
   driver. This is very similar to the mechanism employed for
   the mlx4 (ConnectX) driver. Once the HCA is initialized
   (by mlx5_core), it invokes the interface drivers to do
   their initializations.
4. There is a new event handler which the core registers:
   mlx5_core_event(). This event handler invokes the
   event handlers registered by the interfaces.

Based on a patch by Eli Cohen <eli@mellanox.com>

Signed-off-by: default avatarJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: default avatarEli Cohen <eli@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4ada97ab
Loading
Loading
Loading
Loading
+23 −23
Original line number Original line Diff line number Diff line
@@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
		struct mlx5_core_srq *msrq = NULL;
		struct mlx5_core_srq *msrq = NULL;


		if (qp->ibqp.xrcd) {
		if (qp->ibqp.xrcd) {
			msrq = mlx5_core_get_srq(&dev->mdev,
			msrq = mlx5_core_get_srq(dev->mdev,
						 be32_to_cpu(cqe->srqn));
						 be32_to_cpu(cqe->srqn));
			srq = to_mibsrq(msrq);
			srq = to_mibsrq(msrq);
		} else {
		} else {
@@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,


static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
{
	mlx5_buf_free(&dev->mdev, &buf->buf);
	mlx5_buf_free(dev->mdev, &buf->buf);
}
}


static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -450,7 +450,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
		 * because CQs will be locked while QPs are removed
		 * because CQs will be locked while QPs are removed
		 * from the table.
		 * from the table.
		 */
		 */
		mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
		mqp = __mlx5_qp_lookup(dev->mdev, qpn);
		if (unlikely(!mqp)) {
		if (unlikely(!mqp)) {
			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
				     cq->mcq.cqn, qpn);
				     cq->mcq.cqn, qpn);
@@ -514,11 +514,11 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
	case MLX5_CQE_SIG_ERR:
	case MLX5_CQE_SIG_ERR:
		sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
		sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;


		read_lock(&dev->mdev.priv.mr_table.lock);
		read_lock(&dev->mdev->priv.mr_table.lock);
		mmr = __mlx5_mr_lookup(&dev->mdev,
		mmr = __mlx5_mr_lookup(dev->mdev,
				       mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
				       mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
		if (unlikely(!mmr)) {
		if (unlikely(!mmr)) {
			read_unlock(&dev->mdev.priv.mr_table.lock);
			read_unlock(&dev->mdev->priv.mr_table.lock);
			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
				     cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
				     cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
			return -EINVAL;
			return -EINVAL;
@@ -536,7 +536,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
			     mr->sig->err_item.expected,
			     mr->sig->err_item.expected,
			     mr->sig->err_item.actual);
			     mr->sig->err_item.actual);


		read_unlock(&dev->mdev.priv.mr_table.lock);
		read_unlock(&dev->mdev->priv.mr_table.lock);
		goto repoll;
		goto repoll;
	}
	}


@@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
	mlx5_cq_arm(&to_mcq(ibcq)->mcq,
	mlx5_cq_arm(&to_mcq(ibcq)->mcq,
		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
		    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
		    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
		    to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
		    to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
		    MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
		    MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));


	return 0;
	return 0;
}
}
@@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
{
{
	int err;
	int err;


	err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
	err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
			     PAGE_SIZE * 2, &buf->buf);
			     PAGE_SIZE * 2, &buf->buf);
	if (err)
	if (err)
		return err;
		return err;
@@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
{
{
	int err;
	int err;


	err = mlx5_db_alloc(&dev->mdev, &cq->db);
	err = mlx5_db_alloc(dev->mdev, &cq->db);
	if (err)
	if (err)
		return err;
		return err;


@@ -716,7 +716,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
	mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
	mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);


	(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
	(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
	*index = dev->mdev.priv.uuari.uars[0].index;
	*index = dev->mdev->priv.uuari.uars[0].index;


	return 0;
	return 0;


@@ -724,14 +724,14 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
	free_cq_buf(dev, &cq->buf);
	free_cq_buf(dev, &cq->buf);


err_db:
err_db:
	mlx5_db_free(&dev->mdev, &cq->db);
	mlx5_db_free(dev->mdev, &cq->db);
	return err;
	return err;
}
}


static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
{
{
	free_cq_buf(dev, &cq->buf);
	free_cq_buf(dev, &cq->buf);
	mlx5_db_free(&dev->mdev, &cq->db);
	mlx5_db_free(dev->mdev, &cq->db);
}
}


struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);


	entries = roundup_pow_of_two(entries + 1);
	entries = roundup_pow_of_two(entries + 1);
	if (entries > dev->mdev.caps.max_cqes)
	if (entries > dev->mdev->caps.max_cqes)
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);


	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
	cqb->ctx.c_eqn = cpu_to_be16(eqn);
	cqb->ctx.c_eqn = cpu_to_be16(eqn);
	cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
	cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);


	err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
	err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
	if (err)
	if (err)
		goto err_cqb;
		goto err_cqb;


@@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
	return &cq->ibcq;
	return &cq->ibcq;


err_cmd:
err_cmd:
	mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
	mlx5_core_destroy_cq(dev->mdev, &cq->mcq);


err_cqb:
err_cqb:
	mlx5_vfree(cqb);
	mlx5_vfree(cqb);
@@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
	if (cq->uobject)
	if (cq->uobject)
		context = cq->uobject->context;
		context = cq->uobject->context;


	mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
	mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
	if (context)
	if (context)
		destroy_cq_user(mcq, context);
		destroy_cq_user(mcq, context);
	else
	else
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
	int err;
	int err;
	u32 fsel;
	u32 fsel;


	if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
		return -ENOSYS;
		return -ENOSYS;


	in = kzalloc(sizeof(*in), GFP_KERNEL);
	in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
	in->ctx.cq_period = cpu_to_be16(cq_period);
	in->ctx.cq_period = cpu_to_be16(cq_period);
	in->ctx.cq_max_count = cpu_to_be16(cq_count);
	in->ctx.cq_max_count = cpu_to_be16(cq_count);
	in->field_select = cpu_to_be32(fsel);
	in->field_select = cpu_to_be32(fsel);
	err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
	err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
	kfree(in);
	kfree(in);


	if (err)
	if (err)
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
	int uninitialized_var(cqe_size);
	int uninitialized_var(cqe_size);
	unsigned long flags;
	unsigned long flags;


	if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
		pr_info("Firmware does not support resize CQ\n");
		pr_info("Firmware does not support resize CQ\n");
		return -ENOSYS;
		return -ENOSYS;
	}
	}
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
		return -EINVAL;
		return -EINVAL;


	entries = roundup_pow_of_two(entries + 1);
	entries = roundup_pow_of_two(entries + 1);
	if (entries > dev->mdev.caps.max_cqes + 1)
	if (entries > dev->mdev->caps.max_cqes + 1)
		return -EINVAL;
		return -EINVAL;


	if (entries == ibcq->cqe + 1)
	if (entries == ibcq->cqe + 1)
@@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
	in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
	in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
	in->cqn = cpu_to_be32(cq->mcq.cqn);
	in->cqn = cpu_to_be32(cq->mcq.cqn);


	err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
	err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
	if (err)
	if (err)
		goto ex_alloc;
		goto ex_alloc;


+2 −2
Original line number Original line Diff line number Diff line
@@ -54,7 +54,7 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
	if (ignore_bkey || !in_wc)
	if (ignore_bkey || !in_wc)
		op_modifier |= 0x2;
		op_modifier |= 0x2;


	return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
	return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
}
}


int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)


	packet_error = be16_to_cpu(out_mad->status);
	packet_error = be16_to_cpu(out_mad->status);


	dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
	dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;


out:
out:
+91 −190

File changed.

Preview size limit exceeded, changes collapsed.

+1 −11
Original line number Original line Diff line number Diff line
@@ -360,7 +360,7 @@ struct mlx5_ib_resources {


struct mlx5_ib_dev {
struct mlx5_ib_dev {
	struct ib_device		ib_dev;
	struct ib_device		ib_dev;
	struct mlx5_core_dev		mdev;
	struct mlx5_core_dev		*mdev;
	MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
	MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
	struct list_head		eqs_list;
	struct list_head		eqs_list;
	int				num_ports;
	int				num_ports;
@@ -454,16 +454,6 @@ static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
	return container_of(ibah, struct mlx5_ib_ah, ibah);
	return container_of(ibah, struct mlx5_ib_ah, ibah);
}
}


static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev)
{
	return container_of(dev, struct mlx5_ib_dev, mdev);
}

static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev)
{
	return mlx5_core2ibdev(pci2mlx5_core_dev(pdev));
}

int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
			struct mlx5_db *db);
			struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
+24 −24

File changed.

Preview size limit exceeded, changes collapsed.

Loading