Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4bc2a9bf authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/mlx4: Fix MTT leakage in resize CQ
  IB/ehca: Fix problem with generated flush work completions
  IB/ehca: Change misleading error message on memory hotplug
  mlx4_core: Save/restore default port IB capability mask
parents 6a121411 b0f43dcc
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -163,7 +163,8 @@ struct ehca_mod_qp_parm {
/* struct for tracking if cqes have been reported to the application */
struct ehca_qmap_entry {
	u16 app_wr_id;
	u16 reported;
	u8 reported;
	u8 cqe_req;
};

struct ehca_queue_map {
@@ -171,6 +172,7 @@ struct ehca_queue_map {
	unsigned int entries;
	unsigned int tail;
	unsigned int left_to_poll;
	unsigned int next_wqe_idx;   /* Idx to first wqe to be flushed */
};

struct ehca_qp {
+1 −2
Original line number Diff line number Diff line
@@ -994,8 +994,7 @@ static int ehca_mem_notifier(struct notifier_block *nb,
			if (printk_timed_ratelimit(&ehca_dmem_warn_time,
						   30 * 1000))
				ehca_gen_err("DMEM operations are not allowed"
					     "as long as an ehca adapter is"
					     "attached to the LPAR");
					     "in conjunction with eHCA");
			return NOTIFY_BAD;
		}
	}
+20 −6
Original line number Diff line number Diff line
@@ -435,9 +435,13 @@ static void reset_queue_map(struct ehca_queue_map *qmap)
{
	int i;

	qmap->tail = 0;
	for (i = 0; i < qmap->entries; i++)
	qmap->tail = qmap->entries - 1;
	qmap->left_to_poll = 0;
	qmap->next_wqe_idx = 0;
	for (i = 0; i < qmap->entries; i++) {
		qmap->map[i].reported = 1;
		qmap->map[i].cqe_req = 0;
	}
}

/*
@@ -1121,6 +1125,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
	void *wqe_v;
	u64 q_ofs;
	u32 wqe_idx;
	unsigned int tail_idx;

	/* convert real to abs address */
	wqe_p = wqe_p & (~(1UL << 63));
@@ -1133,12 +1138,17 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
		return -EFAULT;
	}

	tail_idx = (qmap->tail + 1) % qmap->entries;
	wqe_idx = q_ofs / ipz_queue->qe_size;
	if (wqe_idx < qmap->tail)
		qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx;
	else
		qmap->left_to_poll = wqe_idx - qmap->tail;

	/* check all processed wqes, whether a cqe is requested or not */
	while (tail_idx != wqe_idx) {
		if (qmap->map[tail_idx].cqe_req)
			qmap->left_to_poll++;
		tail_idx = (tail_idx + 1) % qmap->entries;
	}
	/* save index in queue, where we have to start flushing */
	qmap->next_wqe_idx = wqe_idx;
	return 0;
}

@@ -1185,10 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
	} else {
		spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
		my_qp->sq_map.left_to_poll = 0;
		my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
						my_qp->sq_map.entries;
		spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);

		spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
		my_qp->rq_map.left_to_poll = 0;
		my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
						my_qp->rq_map.entries;
		spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
	}

+30 −21
Original line number Diff line number Diff line
@@ -179,6 +179,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,

	qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
	qmap_entry->reported = 0;
	qmap_entry->cqe_req = 0;

	switch (send_wr->opcode) {
	case IB_WR_SEND:
@@ -203,8 +204,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,

	if ((send_wr->send_flags & IB_SEND_SIGNALED ||
	    qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
	    && !hidden)
	    && !hidden) {
		wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
		qmap_entry->cqe_req = 1;
	}

	if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
	    send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
@@ -569,6 +572,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
		qmap_entry = &my_qp->rq_map.map[rq_map_idx];
		qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
		qmap_entry->reported = 0;
		qmap_entry->cqe_req = 1;

		wqe_cnt++;
	} /* eof for cur_recv_wr */
@@ -706,27 +710,34 @@ repoll:
		goto repoll;
	wc->qp = &my_qp->ib_qp;

	qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
	if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
		/* We got a send completion. */
		qmap = &my_qp->sq_map;
	else
		/* We got a receive completion. */
		qmap = &my_qp->rq_map;

	/* advance the tail pointer */
	qmap->tail = qmap_tail_idx;

	if (is_error) {
		/*
		 * set left_to_poll to 0 because in error state, we will not
		 * get any additional CQEs
		 */
		ehca_add_to_err_list(my_qp, 1);
		my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
						my_qp->sq_map.entries;
		my_qp->sq_map.left_to_poll = 0;
		ehca_add_to_err_list(my_qp, 1);

		my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
						my_qp->rq_map.entries;
		my_qp->rq_map.left_to_poll = 0;
		if (HAS_RQ(my_qp))
			ehca_add_to_err_list(my_qp, 0);
		my_qp->rq_map.left_to_poll = 0;
	}

	qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
	if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
		/* We got a send completion. */
		qmap = &my_qp->sq_map;
	else
		/* We got a receive completion. */
		qmap = &my_qp->rq_map;

	qmap_entry = &qmap->map[qmap_tail_idx];
	if (qmap_entry->reported) {
		ehca_warn(cq->device, "Double cqe on qp_num=%#x",
@@ -738,10 +749,6 @@ repoll:
	wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
	qmap_entry->reported = 1;

	/* this is a proper completion, we need to advance the tail pointer */
	if (++qmap->tail == qmap->entries)
		qmap->tail = 0;

	/* if left_to_poll is decremented to 0, add the QP to the error list */
	if (qmap->left_to_poll > 0) {
		qmap->left_to_poll--;
@@ -805,13 +812,14 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
	else
		qmap = &my_qp->rq_map;

	qmap_entry = &qmap->map[qmap->tail];
	qmap_entry = &qmap->map[qmap->next_wqe_idx];

	while ((nr < num_entries) && (qmap_entry->reported == 0)) {
		/* generate flush CQE */

		memset(wc, 0, sizeof(*wc));

		offset = qmap->tail * ipz_queue->qe_size;
		offset = qmap->next_wqe_idx * ipz_queue->qe_size;
		wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
		if (!wqe) {
			ehca_err(cq->device, "Invalid wqe offset=%#lx on "
@@ -850,11 +858,12 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,

		wc->qp = &my_qp->ib_qp;

		/* mark as reported and advance tail pointer */
		/* mark as reported and advance next_wqe pointer */
		qmap_entry->reported = 1;
		if (++qmap->tail == qmap->entries)
			qmap->tail = 0;
		qmap_entry = &qmap->map[qmap->tail];
		qmap->next_wqe_idx++;
		if (qmap->next_wqe_idx == qmap->entries)
			qmap->next_wqe_idx = 0;
		qmap_entry = &qmap->map[qmap->next_wqe_idx];

		wc++; nr++;
	}
+5 −0
Original line number Diff line number Diff line
@@ -343,6 +343,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
	struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
	struct mlx4_ib_cq *cq = to_mcq(ibcq);
	struct mlx4_mtt mtt;
	int outst_cqe;
	int err;

@@ -376,10 +377,13 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
			goto out;
	}

	mtt = cq->buf.mtt;

	err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
	if (err)
		goto err_buf;

	mlx4_mtt_cleanup(dev->dev, &mtt);
	if (ibcq->uobject) {
		cq->buf      = cq->resize_buf->buf;
		cq->ibcq.cqe = cq->resize_buf->cqe;
@@ -406,6 +410,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
	goto out;

err_buf:
	mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
	if (!ibcq->uobject)
		mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
				    cq->resize_buf->cqe);
Loading