Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 726fdbe9 authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller
Browse files

qed: Encapsulate interrupt counters in struct



We already have an API struct that contains interrupt-related
numbers. Use it to encapsulate all information relating to the
status of SBs as (used|free).

Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a333f7f3
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -2061,7 +2061,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
		qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
		feat_num[QED_VF_L2_QUE] = min_t(u32,
						RESC_NUM(p_hwfn, QED_L2_QUEUE),
						sb_cnt_info.sb_iov_cnt);
						sb_cnt_info.iov_cnt);
		feat_num[QED_PF_L2_QUE] = min_t(u32,
						RESC_NUM(p_hwfn, QED_SB) -
						non_l2_sbs,
@@ -2255,7 +2255,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
	case QED_SB:
		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
		qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
		*p_resc_num = sb_cnt_info.sb_cnt;
		*p_resc_num = sb_cnt_info.cnt;
		break;
	default:
		return -EINVAL;
+9 −11
Original line number Diff line number Diff line
@@ -1769,7 +1769,7 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
			      bool b_set, bool b_slowpath)
{
	u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
	u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
	u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->usage.cnt;
	u32 igu_sb_id = 0, val = 0;

	val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
@@ -1827,7 +1827,6 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)

       /* Initialize base sb / sb cnt for PFs and VFs */
	p_igu_info->igu_base_sb = 0xffff;
	p_igu_info->igu_sb_cnt = 0;
	p_igu_info->igu_base_sb_iov = 0xffff;

	/* Distinguish between existent and non-existent default SB */
@@ -1856,7 +1855,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
			if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) {
				if (p_igu_info->igu_base_sb == 0xffff)
					p_igu_info->igu_base_sb = igu_sb_id;
				p_igu_info->igu_sb_cnt++;
				p_igu_info->usage.cnt++;
			}
		} else if (!(p_block->is_pf) &&
			   (p_block->function_id >= min_vf) &&
@@ -1867,7 +1866,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)

			if (p_igu_info->igu_base_sb_iov == 0xffff)
				p_igu_info->igu_base_sb_iov = igu_sb_id;
			p_igu_info->free_blks++;
			p_igu_info->usage.iov_cnt++;
		}

		/* Mark the First entry belonging to the PF or its VFs
@@ -1900,12 +1899,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
	}

	/* All non default SB are considered free at this point */
	p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
	p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
	p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;

	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
		   "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x\n",
		   p_igu_info->igu_dsb_id,
		   p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov);
		   p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);

	return 0;
}
@@ -2003,9 +2003,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
	if (!info || !p_sb_cnt_info)
		return;

	p_sb_cnt_info->sb_cnt		= info->igu_sb_cnt;
	p_sb_cnt_info->sb_iov_cnt	= info->igu_sb_cnt_iov;
	p_sb_cnt_info->sb_free_blk	= info->free_blks;
	memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
}

u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
@@ -2014,10 +2012,10 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)

	/* Determine origin of SB id */
	if ((sb_id >= p_info->igu_base_sb) &&
	    (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
	    (sb_id < p_info->igu_base_sb + p_info->usage.cnt)) {
		return sb_id - p_info->igu_base_sb;
	} else if ((sb_id >= p_info->igu_base_sb_iov) &&
		   (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
		   (sb_id < p_info->igu_base_sb_iov + p_info->usage.iov_cnt)) {
		/* We want the first VF queue to be adjacent to the
		 * last PF queue. Since L2 queues can be partial to
		 * SBs, we'll use the feature instead.
+5 −5
Original line number Diff line number Diff line
@@ -216,11 +216,11 @@ struct qed_igu_block {
struct qed_igu_info {
	struct qed_igu_block entry[MAX_TOT_SB_PER_PATH];
	u16 igu_dsb_id;

	u16 igu_base_sb;
	u16 igu_base_sb_iov;
	u16			igu_sb_cnt;
	u16			igu_sb_cnt_iov;
	u16			free_blks;
	struct qed_sb_cnt_info usage;

};

/* TODO Names of function may change... */
+1 −1
Original line number Diff line number Diff line
@@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
	for_each_hwfn(cdev, i) {
		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
		qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
		cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
		cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
		cdev->int_params.in.num_vectors++; /* slowpath */
	}

+4 −5
Original line number Diff line number Diff line
@@ -874,9 +874,9 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,

	igu_blocks = p_hwfn->hw_info.p_igu_info->entry;

	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
		num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
	p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
		num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
	p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;

	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
@@ -932,8 +932,7 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
		qed_wr(p_hwfn, p_ptt, addr, val);

		p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;

		p_hwfn->hw_info.p_igu_info->free_blks++;
		p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
	}

	vf->num_sbs = 0;
Loading