Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 258daca2 authored by David S. Miller's avatar David S. Miller
Browse files
parents 3ed6f695 077887c3
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -409,6 +409,9 @@
#define E1000_ICS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Aserted */

/* Extended Interrupt Cause Set */
/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
#define E1000_EITR_CNT_IGNR     0x80000000 /* Don't reset counters on write */


/* Transmit Descriptor Control */
/* Enable the counting of descriptors still to be processed. */
+36 −17
Original line number Diff line number Diff line
@@ -42,8 +42,11 @@

struct igb_adapter;

/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
#define IGB_START_ITR 648
/* Interrupt defines */
#define IGB_START_ITR                    648 /* ~6000 ints/sec */
#define IGB_4K_ITR                       980
#define IGB_20K_ITR                      196
#define IGB_70K_ITR                       56

/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD                  256
@@ -146,6 +149,7 @@ struct igb_tx_buffer {
	struct sk_buff *skb;
	unsigned int bytecount;
	u16 gso_segs;
	__be16 protocol;
	dma_addr_t dma;
	u32 length;
	u32 tx_flags;
@@ -174,15 +178,24 @@ struct igb_rx_queue_stats {
	u64 alloc_failed;
};

struct igb_ring_container {
	struct igb_ring *ring;		/* pointer to linked list of rings */
	unsigned int total_bytes;	/* total bytes processed this int */
	unsigned int total_packets;	/* total packets processed this int */
	u16 work_limit;			/* total work allowed per interrupt */
	u8 count;			/* total number of rings in vector */
	u8 itr;				/* current ITR setting for ring */
};

struct igb_q_vector {
	struct igb_adapter *adapter;	/* backlink */
	struct igb_ring *rx_ring;
	struct igb_ring *tx_ring;
	struct napi_struct napi;
	int cpu;			/* CPU for DCA */
	u32 eims_value;			/* EIMS mask value */

	u32 eims_value;
	u16 cpu;
	u16 tx_work_limit;
	struct igb_ring_container rx, tx;

	struct napi_struct napi;
	int numa_node;

	u16 itr_val;
	u8 set_itr;
@@ -212,16 +225,12 @@ struct igb_ring {
	u16 next_to_clean ____cacheline_aligned_in_smp;
	u16 next_to_use;

	unsigned int total_bytes;
	unsigned int total_packets;

	union {
		/* TX */
		struct {
			struct igb_tx_queue_stats tx_stats;
			struct u64_stats_sync tx_syncp;
			struct u64_stats_sync tx_syncp2;
			bool detect_tx_hung;
		};
		/* RX */
		struct {
@@ -231,12 +240,14 @@ struct igb_ring {
	};
	/* Items past this point are only used during ring alloc / free */
	dma_addr_t dma;                /* phys address of the ring */
	int numa_node;                  /* node to alloc ring memory on */
};

#define IGB_RING_FLAG_RX_CSUM        0x00000001 /* RX CSUM enabled */
#define IGB_RING_FLAG_RX_SCTP_CSUM   0x00000002 /* SCTP CSUM offload enabled */

#define IGB_RING_FLAG_TX_CTX_IDX     0x00000001 /* HW requires context index */
enum e1000_ring_flags_t {
	IGB_RING_FLAG_RX_SCTP_CSUM,
	IGB_RING_FLAG_TX_CTX_IDX,
	IGB_RING_FLAG_TX_DETECT_HANG
};

#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)

@@ -247,6 +258,13 @@ struct igb_ring {
#define IGB_TX_CTXTDESC(R, i)	    \
	(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))

/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
				      const u32 stat_err_bits)
{
	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
}

/* igb_desc_unused - calculate if we have unused descriptors */
static inline int igb_desc_unused(struct igb_ring *ring)
{
@@ -340,6 +358,7 @@ struct igb_adapter {
	int vf_rate_link_speed;
	u32 rss_queues;
	u32 wvbr;
	int node;
};

#define IGB_FLAG_HAS_MSI           (1 << 0)
+6 −8
Original line number Diff line number Diff line
@@ -1577,16 +1577,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
	union e1000_adv_rx_desc *rx_desc;
	struct igb_rx_buffer *rx_buffer_info;
	struct igb_tx_buffer *tx_buffer_info;
	int rx_ntc, tx_ntc, count = 0;
	u32 staterr;
	u16 rx_ntc, tx_ntc, count = 0;

	/* initialize next to clean and descriptor values */
	rx_ntc = rx_ring->next_to_clean;
	tx_ntc = tx_ring->next_to_clean;
	rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

	while (staterr & E1000_RXD_STAT_DD) {
	while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
		/* check rx buffer */
		rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];

@@ -1615,7 +1613,6 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,

		/* fetch next descriptor */
		rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
	}

	/* re-map buffers to ring, store next to clean values */
@@ -1630,7 +1627,8 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
{
	struct igb_ring *tx_ring = &adapter->test_tx_ring;
	struct igb_ring *rx_ring = &adapter->test_rx_ring;
	int i, j, lc, good_cnt, ret_val = 0;
	u16 i, j, lc, good_cnt;
	int ret_val = 0;
	unsigned int size = IGB_RX_HDR_LEN;
	netdev_tx_t tx_ret_val;
	struct sk_buff *skb;
@@ -2008,8 +2006,8 @@ static int igb_set_coalesce(struct net_device *netdev,

	for (i = 0; i < adapter->num_q_vectors; i++) {
		struct igb_q_vector *q_vector = adapter->q_vector[i];
		q_vector->tx_work_limit = adapter->tx_work_limit;
		if (q_vector->rx_ring)
		q_vector->tx.work_limit = adapter->tx_work_limit;
		if (q_vector->rx.ring)
			q_vector->itr_val = adapter->rx_itr_setting;
		else
			q_vector->itr_val = adapter->tx_itr_setting;
+366 −309

File changed.

Preview size limit exceeded, changes collapsed.