Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 348bfec2 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qed-XDP-support'



Yuval Mintz says:

====================
qed*: Add XDP support

This patch series is intended to add XDP to the qede driver, although
it contains quite a bit of cleanups, refactorings and infrastructure
changes as well.

The content of this series can be roughly divided into:

 - Datapath improvements - mostly focused on having the datapath utilize
parameters which can be more tightly contained in cachelines.
Patches #1, #2, #8, #9 belong to this group.

 - Refactoring - done mostly in favour of XDP. Patches #3, #4, #5, #9.

 - Infrastructure changes - done in favour of XDP. Paches #6 and #7 belong
to this category [#7 being by far the biggest patch in the series].

 - Actual XDP support - last two patches [#10, #11].
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f54b8cd6 cb6aeb07
Loading
Loading
Loading
Loading
+0 −12
Original line number Original line Diff line number Diff line
@@ -241,15 +241,6 @@ struct qed_hw_info {
	enum qed_wol_support b_wol_support;
	enum qed_wol_support b_wol_support;
};
};


struct qed_hw_cid_data {
	u32	cid;
	bool	b_cid_allocated;

	/* Additional identifiers */
	u16	opaque_fid;
	u8	vport_id;
};

/* maximun size of read/write commands (HW limit) */
/* maximun size of read/write commands (HW limit) */
#define DMAE_MAX_RW_SIZE        0x2000
#define DMAE_MAX_RW_SIZE        0x2000


@@ -416,9 +407,6 @@ struct qed_hwfn {


	struct qed_dcbx_info		*p_dcbx_info;
	struct qed_dcbx_info		*p_dcbx_info;


	struct qed_hw_cid_data		*p_tx_cids;
	struct qed_hw_cid_data		*p_rx_cids;

	struct qed_dmae_info		dmae_info;
	struct qed_dmae_info		dmae_info;


	/* QM init */
	/* QM init */
+4 −29
Original line number Original line Diff line number Diff line
@@ -134,15 +134,6 @@ void qed_resc_free(struct qed_dev *cdev)


	kfree(cdev->reset_stats);
	kfree(cdev->reset_stats);


	for_each_hwfn(cdev, i) {
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];

		kfree(p_hwfn->p_tx_cids);
		p_hwfn->p_tx_cids = NULL;
		kfree(p_hwfn->p_rx_cids);
		p_hwfn->p_rx_cids = NULL;
	}

	for_each_hwfn(cdev, i) {
	for_each_hwfn(cdev, i) {
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];


@@ -425,23 +416,6 @@ int qed_resc_alloc(struct qed_dev *cdev)
	if (!cdev->fw_data)
	if (!cdev->fw_data)
		return -ENOMEM;
		return -ENOMEM;


	/* Allocate Memory for the Queue->CID mapping */
	for_each_hwfn(cdev, i) {
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
		int tx_size = sizeof(struct qed_hw_cid_data) *
				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
		int rx_size = sizeof(struct qed_hw_cid_data) *
				     RESC_NUM(p_hwfn, QED_L2_QUEUE);

		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
		if (!p_hwfn->p_tx_cids)
			goto alloc_no_mem;

		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
		if (!p_hwfn->p_rx_cids)
			goto alloc_no_mem;
	}

	for_each_hwfn(cdev, i) {
	for_each_hwfn(cdev, i) {
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
		u32 n_eqes, num_cons;
		u32 n_eqes, num_cons;
@@ -2283,12 +2257,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
{
{
	void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
	void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
	u32 page_cnt = p_chain->page_cnt, i, pbl_size;
	u32 page_cnt = p_chain->page_cnt, i, pbl_size;
	u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
	u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;


	if (!pp_virt_addr_tbl)
	if (!pp_virt_addr_tbl)
		return;
		return;


	if (!p_chain->pbl.p_virt_table)
	if (!p_pbl_virt)
		goto out;
		goto out;


	for (i = 0; i < page_cnt; i++) {
	for (i = 0; i < page_cnt; i++) {
@@ -2306,7 +2280,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
	pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
	pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
	dma_free_coherent(&cdev->pdev->dev,
	dma_free_coherent(&cdev->pdev->dev,
			  pbl_size,
			  pbl_size,
			  p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
			  p_chain->pbl_sp.p_virt_table,
			  p_chain->pbl_sp.p_phys_table);
out:
out:
	vfree(p_chain->pbl.pp_virt_addr_tbl);
	vfree(p_chain->pbl.pp_virt_addr_tbl);
}
}
Loading