Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 348bfec2 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qed-XDP-support'



Yuval Mintz says:

====================
qed*: Add XDP support

This patch series is intended to add XDP to the qede driver, although
it contains quite a bit of cleanups, refactorings and infrastructure
changes as well.

The content of this series can be roughly divided into:

 - Datapath improvements - mostly focused on having the datapath utilize
parameters which can be more tightly contained in cachelines.
Patches #1, #2, #8, #9 belong to this group.

 - Refactoring - done mostly in favour of XDP. Patches #3, #4, #5, #9.

 - Infrastructure changes - done in favour of XDP. Paches #6 and #7 belong
to this category [#7 being by far the biggest patch in the series].

 - Actual XDP support - last two patches [#10, #11].
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f54b8cd6 cb6aeb07
Loading
Loading
Loading
Loading
+0 −12
Original line number Diff line number Diff line
@@ -241,15 +241,6 @@ struct qed_hw_info {
	enum qed_wol_support b_wol_support;
};

struct qed_hw_cid_data {
	u32	cid;
	bool	b_cid_allocated;

	/* Additional identifiers */
	u16	opaque_fid;
	u8	vport_id;
};

/* maximun size of read/write commands (HW limit) */
#define DMAE_MAX_RW_SIZE        0x2000

@@ -416,9 +407,6 @@ struct qed_hwfn {

	struct qed_dcbx_info		*p_dcbx_info;

	struct qed_hw_cid_data		*p_tx_cids;
	struct qed_hw_cid_data		*p_rx_cids;

	struct qed_dmae_info		dmae_info;

	/* QM init */
+4 −29
Original line number Diff line number Diff line
@@ -134,15 +134,6 @@ void qed_resc_free(struct qed_dev *cdev)

	kfree(cdev->reset_stats);

	for_each_hwfn(cdev, i) {
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];

		kfree(p_hwfn->p_tx_cids);
		p_hwfn->p_tx_cids = NULL;
		kfree(p_hwfn->p_rx_cids);
		p_hwfn->p_rx_cids = NULL;
	}

	for_each_hwfn(cdev, i) {
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];

@@ -425,23 +416,6 @@ int qed_resc_alloc(struct qed_dev *cdev)
	if (!cdev->fw_data)
		return -ENOMEM;

	/* Allocate Memory for the Queue->CID mapping */
	for_each_hwfn(cdev, i) {
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
		int tx_size = sizeof(struct qed_hw_cid_data) *
				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
		int rx_size = sizeof(struct qed_hw_cid_data) *
				     RESC_NUM(p_hwfn, QED_L2_QUEUE);

		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
		if (!p_hwfn->p_tx_cids)
			goto alloc_no_mem;

		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
		if (!p_hwfn->p_rx_cids)
			goto alloc_no_mem;
	}

	for_each_hwfn(cdev, i) {
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
		u32 n_eqes, num_cons;
@@ -2283,12 +2257,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
{
	void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
	u32 page_cnt = p_chain->page_cnt, i, pbl_size;
	u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
	u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;

	if (!pp_virt_addr_tbl)
		return;

	if (!p_chain->pbl.p_virt_table)
	if (!p_pbl_virt)
		goto out;

	for (i = 0; i < page_cnt; i++) {
@@ -2306,7 +2280,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
	pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
	dma_free_coherent(&cdev->pdev->dev,
			  pbl_size,
			  p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
			  p_chain->pbl_sp.p_virt_table,
			  p_chain->pbl_sp.p_phys_table);
out:
	vfree(p_chain->pbl.pp_virt_addr_tbl);
}
+330 −265

File changed.

Preview size limit exceeded, changes collapsed.

+104 −29
Original line number Diff line number Diff line
@@ -78,11 +78,34 @@ struct qed_filter_mcast {
	unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
};

int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
			     u16 rx_queue_id,
/**
 * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
 *
 * @param p_hwfn
 * @param p_rxq			Handler of queue to close
 * @param eq_completion_only	If True completion will be on
 *				EQe, if False completion will be
 *				on EQe if p_hwfn opaque
 *				different from the RXQ opaque
 *				otherwise on CQe.
 * @param cqe_completion	If True completion will be
 *				receive on CQe.
 * @return int
 */
int
qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
		      void *p_rxq,
		      bool eq_completion_only, bool cqe_completion);

int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
/**
 * @brief qed_eth_tx_queue_stop - closes a Tx queue
 *
 * @param p_hwfn
 * @param p_txq - handle to Tx queue needed to be closed
 *
 * @return int
 */
int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);

enum qed_tpa_mode {
	QED_TPA_MODE_NONE,
@@ -196,19 +219,19 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
 * @note At the moment - only used by non-linux VFs.
 *
 * @param p_hwfn
 * @param rx_queue_id		RX Queue ID
 * @param num_rxqs		Allow to update multiple rx
 *				queues, from rx_queue_id to
 *				(rx_queue_id + num_rxqs)
 * @param pp_rxq_handlers	An array of queue handlers to be updated.
 * @param num_rxqs              number of queues to update.
 * @param complete_cqe_flg	Post completion to the CQE Ring if set
 * @param complete_event_flg	Post completion to the Event Ring if set
 * @param comp_mode
 * @param p_comp_data
 *
 * @return int
 */

int
qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
			    u16 rx_queue_id,
			    void **pp_rxq_handlers,
			    u8 num_rxqs,
			    u8 complete_cqe_flg,
			    u8 complete_event_flg,
@@ -217,27 +240,79 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,

void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);

int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
			   struct qed_sp_vport_start_params *p_params);
void qed_reset_vport_stats(struct qed_dev *cdev);

struct qed_queue_cid {
	/* 'Relative' is a relative term ;-). Usually the indices [not counting
	 * SBs] would be PF-relative, but there are some cases where that isn't
	 * the case - specifically for a PF configuring its VF indices it's
	 * possible some fields [E.g., stats-id] in 'rel' would already be abs.
	 */
	struct qed_queue_start_common_params rel;
	struct qed_queue_start_common_params abs;
	u32 cid;
	u16 opaque_fid;

	/* VFs queues are mapped differently, so we need to know the
	 * relative queue associated with them [0-based].
	 * Notice this is relevant on the *PF* queue-cid of its VF's queues,
	 * and not on the VF itself.
	 */
	bool is_vf;
	u8 vf_qid;

	/* Legacy VFs might have Rx producer located elsewhere */
	bool b_legacy_vf;
};

int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
			       struct qed_queue_cid *p_cid);

struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
					    u16 opaque_fid,
					    u32 cid,
				struct qed_queue_start_common_params *params,
				u8 stats_id,
					    u8 vf_qid,
					    struct qed_queue_start_common_params
					    *p_params);

int
qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
		       struct qed_sp_vport_start_params *p_params);

/**
 * @brief - Starts an Rx queue, when queue_cid is already prepared
 *
 * @param p_hwfn
 * @param p_cid
 * @param bd_max_bytes
 * @param bd_chain_phys_addr
 * @param cqe_pbl_addr
 * @param cqe_pbl_size
 *
 * @return int
 */
int
qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
			 struct qed_queue_cid *p_cid,
			 u16 bd_max_bytes,
			 dma_addr_t bd_chain_phys_addr,
				dma_addr_t cqe_pbl_addr,
				u16 cqe_pbl_size, bool b_use_zone_a_prod);
			 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);

int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
				u16  opaque_fid,
				u32  cid,
				struct qed_queue_start_common_params *p_params,
				u8  stats_id,
				dma_addr_t pbl_addr,
				u16 pbl_size,
				union qed_qm_pq_params *p_pq_params);
/**
 * @brief - Starts a Tx queue, where queue_cid is already prepared
 *
 * @param p_hwfn
 * @param p_cid
 * @param pbl_addr
 * @param pbl_size
 * @param p_pq_params - parameters for choosing the PQ for this Tx queue
 *
 * @return int
 */
int
qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
			 struct qed_queue_cid *p_cid,
			 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id);

u8 qed_mcast_bin_from_mac(u8 *mac);

+2 −2
Original line number Diff line number Diff line
@@ -347,11 +347,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,

	/* Place EQ address in RAMROD */
	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
		       p_hwfn->p_eq->chain.pbl.p_phys_table);
		       p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
	page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
	p_ramrod->event_ring_num_pages = page_cnt;
	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
		       p_hwfn->p_consq->chain.pbl.p_phys_table);
		       p_hwfn->p_consq->chain.pbl_sp.p_phys_table);

	qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);

Loading