Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3383a999 authored by Yuval Basson's avatar Yuval Basson Committed by Greg Kroah-Hartman
Browse files

RDMA/qedr: SRQ's bug fixes

[ Upstream commit acca72e2b031b9fbb4184511072bd246a0abcebc ]

QP's with the same SRQ, working on different CQs and running in parallel
on different CPUs could lead to a race when maintaining the SRQ consumer
count, and leads to FW running out of SRQs. Update the consumer
atomically.  Make sure the wqe_prod is updated after the sge_prod due to
FW requirements.

Fixes: 3491c9e7 ("qedr: Add support for kernel mode SRQ's")
Link: https://lore.kernel.org/r/20200708195526.31040-1-ybason@marvell.com


Signed-off-by: default avatarMichal Kalderon <mkalderon@marvell.com>
Signed-off-by: default avatarYuval Basson <ybason@marvell.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent dbc374d0
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -351,10 +351,10 @@ struct qedr_srq_hwq_info {
	u32 wqe_prod;
	u32 sge_prod;
	u32 wr_prod_cnt;
	u32 wr_cons_cnt;
	atomic_t wr_cons_cnt;
	u32 num_elems;

	u32 *virt_prod_pair_addr;
	struct rdma_srq_producers *virt_prod_pair_addr;
	dma_addr_t phy_prod_pair_addr;
};

+10 −12
Original line number Diff line number Diff line
@@ -3577,7 +3577,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
	 * count and consumer count and subtract it from max
	 * work request supported so that we get elements left.
	 */
	used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
	used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);

	return hw_srq->max_wr - used;
}
@@ -3592,7 +3592,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
	unsigned long flags;
	int status = 0;
	u32 num_sge;
	u32 offset;

	spin_lock_irqsave(&srq->lock, flags);

@@ -3605,7 +3604,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
		if (!qedr_srq_elem_left(hw_srq) ||
		    wr->num_sge > srq->hw_srq.max_sges) {
			DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
			       hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
			       hw_srq->wr_prod_cnt,
			       atomic_read(&hw_srq->wr_cons_cnt),
			       wr->num_sge, srq->hw_srq.max_sges);
			status = -ENOMEM;
			*bad_wr = wr;
@@ -3639,22 +3639,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
			hw_srq->sge_prod++;
		}

		/* Flush WQE and SGE information before
		/* Update WQE and SGE information before
		 * updating producer.
		 */
		wmb();
		dma_wmb();

		/* SRQ producer is 8 bytes. Need to update SGE producer index
		 * in first 4 bytes and need to update WQE producer in
		 * next 4 bytes.
		 */
		*srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
		offset = offsetof(struct rdma_srq_producers, wqe_prod);
		*((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
			hw_srq->wqe_prod;
		srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod;
		/* Make sure sge producer is updated first */
		dma_wmb();
		srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod;

		/* Flush producer after updating it. */
		wmb();
		wr = wr->next;
	}

@@ -4077,7 +4075,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
	} else {
		__process_resp_one(dev, qp, cq, wc, resp, wr_id);
	}
	srq->hw_srq.wr_cons_cnt++;
	atomic_inc(&srq->hw_srq.wr_cons_cnt);

	return 1;
}