Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a16152c8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Doug Ledford:
 "One ocrdma fix:

   - The new CQ API support was added to ocrdma, but they got the arming
     logic wrong, so without this, transfers eventually fail when they
     fail to arm the interrupt properly under load

  Two related fixes for mlx4:

   - When we added the 64bit extended counters support to the core IB
     code, they forgot to update the RoCE side of the mlx4 driver (the
     IB side they properly updated).

     I debated whether or not to include these patches as they could be
     considered feature enablement patches, but the existing code will
     blindy copy the 32bit counters, whether any counters were requested
     at all (a bug).

     These two patches make it (a) check to see that counters were
     requested and (b) copy the right counters (the 64bit support is
     new, the 32bit is not).  For that reason I went ahead and took
     them"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/mlx4: Add support for the port info class for RoCE ports
  IB/mlx4: Add support for extended counters over RoCE ports
  RDMA/ocrdma: Fix arm logic to align with new cq API
parents 7ee302f6 c2bab619
Loading
Loading
Loading
Loading
+50 −13
Original line number Diff line number Diff line
@@ -817,9 +817,15 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}

static void edit_counter(struct mlx4_counter *cnt,
					struct ib_pma_portcounters *pma_cnt)
static void edit_counter(struct mlx4_counter *cnt, void *counters,
			 __be16 attr_id)
{
	switch (attr_id) {
	case IB_PMA_PORT_COUNTERS:
	{
		struct ib_pma_portcounters *pma_cnt =
			(struct ib_pma_portcounters *)counters;

		ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
				     (be64_to_cpu(cnt->tx_bytes) >> 2));
		ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
@@ -828,6 +834,31 @@ static void edit_counter(struct mlx4_counter *cnt,
				     be64_to_cpu(cnt->tx_frames));
		ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
				     be64_to_cpu(cnt->rx_frames));
		break;
	}
	case IB_PMA_PORT_COUNTERS_EXT:
	{
		struct ib_pma_portcounters_ext *pma_cnt_ext =
			(struct ib_pma_portcounters_ext *)counters;

		pma_cnt_ext->port_xmit_data =
			cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
		pma_cnt_ext->port_rcv_data =
			cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
		pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
		pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
		break;
	}
	}
}

static int iboe_process_mad_port_info(void *out_mad)
{
	struct ib_class_port_info cpi = {};

	cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
	memcpy(out_mad, &cpi, sizeof(cpi));
	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}

static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -842,6 +873,9 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
		return -EINVAL;

	if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
		return iboe_process_mad_port_info((void *)(out_mad->data + 40));

	memset(&counter_stats, 0, sizeof(counter_stats));
	mutex_lock(&dev->counters_table[port_num - 1].mutex);
	list_for_each_entry(tmp_counter,
@@ -863,7 +897,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
		switch (counter_stats.counter_mode & 0xf) {
		case 0:
			edit_counter(&counter_stats,
				     (void *)(out_mad->data + 40));
				     (void *)(out_mad->data + 40),
				     in_mad->mad_hdr.attr_id);
			err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
			break;
		default:
@@ -894,8 +929,10 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
	 */
	if (link == IB_LINK_LAYER_INFINIBAND) {
		if (mlx4_is_slave(dev->dev) &&
		    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
		    in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
		    (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
		     (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
		      in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
		      in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
			return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
						in_grh, in_mad, out_mad);

+0 −3
Original line number Diff line number Diff line
@@ -323,9 +323,6 @@ struct ocrdma_cq {
			 */
	u32 max_hw_cqe;
	bool phase_change;
	bool deferred_arm, deferred_sol;
	bool first_arm;

	spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
						   * to cq polling
						   */
+4 −14
Original line number Diff line number Diff line
@@ -1094,7 +1094,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
	spin_lock_init(&cq->comp_handler_lock);
	INIT_LIST_HEAD(&cq->sq_head);
	INIT_LIST_HEAD(&cq->rq_head);
	cq->first_arm = true;

	if (ib_ctx) {
		uctx = get_ocrdma_ucontext(ib_ctx);
@@ -2910,12 +2909,9 @@ static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
	}
stop_cqe:
	cq->getp = cur_getp;
	if (cq->deferred_arm || polled_hw_cqes) {
		ocrdma_ring_cq_db(dev, cq->id, cq->deferred_arm,
				  cq->deferred_sol, polled_hw_cqes);
		cq->deferred_arm = false;
		cq->deferred_sol = false;
	}

	if (polled_hw_cqes)
		ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);

	return i;
}
@@ -2999,13 +2995,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
	if (cq_flags & IB_CQ_SOLICITED)
		sol_needed = true;

	if (cq->first_arm) {
	ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
		cq->first_arm = false;
	}

	cq->deferred_arm = true;
	cq->deferred_sol = sol_needed;
	spin_unlock_irqrestore(&cq->cq_lock, flags);

	return 0;