Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7aa06bf5 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx5-next'



Eli Cohen says:

====================
mlx5 driver changes related to PCI handling ***

The first of these patches is changing the pci device driver from mlx5_ib to
mlx5_core in a similar manner it is done in mlx4. This set the grounds for us
to introduce Ethernet driver for HW which uses mlx5.

The other two patches contain minor fixes.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4ada97ab 4d2f9bbb
Loading
Loading
Loading
Loading
+24 −24
Original line number Original line Diff line number Diff line
@@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
		struct mlx5_core_srq *msrq = NULL;
		struct mlx5_core_srq *msrq = NULL;


		if (qp->ibqp.xrcd) {
		if (qp->ibqp.xrcd) {
			msrq = mlx5_core_get_srq(&dev->mdev,
			msrq = mlx5_core_get_srq(dev->mdev,
						 be32_to_cpu(cqe->srqn));
						 be32_to_cpu(cqe->srqn));
			srq = to_mibsrq(msrq);
			srq = to_mibsrq(msrq);
		} else {
		} else {
@@ -348,7 +348,7 @@ static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
			   u16 tail, u16 head)
			   u16 tail, u16 head)
{
{
	int idx;
	u16 idx;


	do {
	do {
		idx = tail & (qp->sq.wqe_cnt - 1);
		idx = tail & (qp->sq.wqe_cnt - 1);
@@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,


static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
{
	mlx5_buf_free(&dev->mdev, &buf->buf);
	mlx5_buf_free(dev->mdev, &buf->buf);
}
}


static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -450,7 +450,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
		 * because CQs will be locked while QPs are removed
		 * because CQs will be locked while QPs are removed
		 * from the table.
		 * from the table.
		 */
		 */
		mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
		mqp = __mlx5_qp_lookup(dev->mdev, qpn);
		if (unlikely(!mqp)) {
		if (unlikely(!mqp)) {
			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
				     cq->mcq.cqn, qpn);
				     cq->mcq.cqn, qpn);
@@ -514,11 +514,11 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
	case MLX5_CQE_SIG_ERR:
	case MLX5_CQE_SIG_ERR:
		sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
		sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;


		read_lock(&dev->mdev.priv.mr_table.lock);
		read_lock(&dev->mdev->priv.mr_table.lock);
		mmr = __mlx5_mr_lookup(&dev->mdev,
		mmr = __mlx5_mr_lookup(dev->mdev,
				       mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
				       mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
		if (unlikely(!mmr)) {
		if (unlikely(!mmr)) {
			read_unlock(&dev->mdev.priv.mr_table.lock);
			read_unlock(&dev->mdev->priv.mr_table.lock);
			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
				     cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
				     cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
			return -EINVAL;
			return -EINVAL;
@@ -536,7 +536,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
			     mr->sig->err_item.expected,
			     mr->sig->err_item.expected,
			     mr->sig->err_item.actual);
			     mr->sig->err_item.actual);


		read_unlock(&dev->mdev.priv.mr_table.lock);
		read_unlock(&dev->mdev->priv.mr_table.lock);
		goto repoll;
		goto repoll;
	}
	}


@@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
	mlx5_cq_arm(&to_mcq(ibcq)->mcq,
	mlx5_cq_arm(&to_mcq(ibcq)->mcq,
		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
		    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
		    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
		    to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
		    to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
		    MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
		    MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));


	return 0;
	return 0;
}
}
@@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
{
{
	int err;
	int err;


	err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
	err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
			     PAGE_SIZE * 2, &buf->buf);
			     PAGE_SIZE * 2, &buf->buf);
	if (err)
	if (err)
		return err;
		return err;
@@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
{
{
	int err;
	int err;


	err = mlx5_db_alloc(&dev->mdev, &cq->db);
	err = mlx5_db_alloc(dev->mdev, &cq->db);
	if (err)
	if (err)
		return err;
		return err;


@@ -716,7 +716,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
	mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
	mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);


	(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
	(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
	*index = dev->mdev.priv.uuari.uars[0].index;
	*index = dev->mdev->priv.uuari.uars[0].index;


	return 0;
	return 0;


@@ -724,14 +724,14 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
	free_cq_buf(dev, &cq->buf);
	free_cq_buf(dev, &cq->buf);


err_db:
err_db:
	mlx5_db_free(&dev->mdev, &cq->db);
	mlx5_db_free(dev->mdev, &cq->db);
	return err;
	return err;
}
}


static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
{
{
	free_cq_buf(dev, &cq->buf);
	free_cq_buf(dev, &cq->buf);
	mlx5_db_free(&dev->mdev, &cq->db);
	mlx5_db_free(dev->mdev, &cq->db);
}
}


struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);


	entries = roundup_pow_of_two(entries + 1);
	entries = roundup_pow_of_two(entries + 1);
	if (entries > dev->mdev.caps.max_cqes)
	if (entries > dev->mdev->caps.max_cqes)
		return ERR_PTR(-EINVAL);
		return ERR_PTR(-EINVAL);


	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
	cqb->ctx.c_eqn = cpu_to_be16(eqn);
	cqb->ctx.c_eqn = cpu_to_be16(eqn);
	cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
	cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);


	err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
	err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
	if (err)
	if (err)
		goto err_cqb;
		goto err_cqb;


@@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
	return &cq->ibcq;
	return &cq->ibcq;


err_cmd:
err_cmd:
	mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
	mlx5_core_destroy_cq(dev->mdev, &cq->mcq);


err_cqb:
err_cqb:
	mlx5_vfree(cqb);
	mlx5_vfree(cqb);
@@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
	if (cq->uobject)
	if (cq->uobject)
		context = cq->uobject->context;
		context = cq->uobject->context;


	mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
	mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
	if (context)
	if (context)
		destroy_cq_user(mcq, context);
		destroy_cq_user(mcq, context);
	else
	else
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
	int err;
	int err;
	u32 fsel;
	u32 fsel;


	if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
		return -ENOSYS;
		return -ENOSYS;


	in = kzalloc(sizeof(*in), GFP_KERNEL);
	in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
	in->ctx.cq_period = cpu_to_be16(cq_period);
	in->ctx.cq_period = cpu_to_be16(cq_period);
	in->ctx.cq_max_count = cpu_to_be16(cq_count);
	in->ctx.cq_max_count = cpu_to_be16(cq_count);
	in->field_select = cpu_to_be32(fsel);
	in->field_select = cpu_to_be32(fsel);
	err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
	err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
	kfree(in);
	kfree(in);


	if (err)
	if (err)
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
	int uninitialized_var(cqe_size);
	int uninitialized_var(cqe_size);
	unsigned long flags;
	unsigned long flags;


	if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
		pr_info("Firmware does not support resize CQ\n");
		pr_info("Firmware does not support resize CQ\n");
		return -ENOSYS;
		return -ENOSYS;
	}
	}
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
		return -EINVAL;
		return -EINVAL;


	entries = roundup_pow_of_two(entries + 1);
	entries = roundup_pow_of_two(entries + 1);
	if (entries > dev->mdev.caps.max_cqes + 1)
	if (entries > dev->mdev->caps.max_cqes + 1)
		return -EINVAL;
		return -EINVAL;


	if (entries == ibcq->cqe + 1)
	if (entries == ibcq->cqe + 1)
@@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
	in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
	in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
	in->cqn = cpu_to_be32(cq->mcq.cqn);
	in->cqn = cpu_to_be32(cq->mcq.cqn);


	err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
	err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
	if (err)
	if (err)
		goto ex_alloc;
		goto ex_alloc;


+3 −3
Original line number Original line Diff line number Diff line
@@ -41,7 +41,7 @@ enum {
};
};


int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
		 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
		 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
		 void *in_mad, void *response_mad)
		 void *in_mad, void *response_mad)
{
{
	u8 op_modifier = 0;
	u8 op_modifier = 0;
@@ -54,7 +54,7 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
	if (ignore_bkey || !in_wc)
	if (ignore_bkey || !in_wc)
		op_modifier |= 0x2;
		op_modifier |= 0x2;


	return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
	return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
}
}


int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)


	packet_error = be16_to_cpu(out_mad->status);
	packet_error = be16_to_cpu(out_mad->status);


	dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
	dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;


out:
out:
+98 −197

File changed.

Preview size limit exceeded, changes collapsed.

+1 −1
Original line number Original line Diff line number Diff line
@@ -148,7 +148,7 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
	u64 off_mask;
	u64 off_mask;
	u64 buf_off;
	u64 buf_off;


	page_size = 1 << page_shift;
	page_size = (u64)1 << page_shift;
	page_mask = page_size - 1;
	page_mask = page_size - 1;
	buf_off = addr & page_mask;
	buf_off = addr & page_mask;
	off_size = page_size >> 6;
	off_size = page_size >> 6;
+2 −12

File changed.

Preview size limit exceeded, changes collapsed.

Loading