Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a1342206 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
parents b453257f 5523662c
Loading
Loading
Loading
Loading
+40 −33
Original line number Diff line number Diff line
@@ -426,9 +426,30 @@ static void tg3_enable_ints(struct tg3 *tp)
	tg3_cond_int(tp);
}

static inline unsigned int tg3_has_work(struct tg3 *tp)
{
	struct tg3_hw_status *sblk = tp->hw_status;
	unsigned int work_exists = 0;

	/* check for phy events */
	if (!(tp->tg3_flags &
	      (TG3_FLAG_USE_LINKCHG_REG |
	       TG3_FLAG_POLL_SERDES))) {
		if (sblk->status & SD_STATUS_LINK_CHG)
			work_exists = 1;
	}
	/* check for RX/TX work to do */
	if (sblk->idx[0].tx_consumer != tp->tx_cons ||
	    sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
		work_exists = 1;

	return work_exists;
}

/* tg3_restart_ints
 *  similar to tg3_enable_ints, but it can return without flushing the
 *  PIO write which reenables interrupts
 *  similar to tg3_enable_ints, but it accurately determines whether there
 *  is new work pending and can return without flushing the PIO write
 *  which reenables interrupts 
 */
static void tg3_restart_ints(struct tg3 *tp)
{
@@ -437,7 +458,9 @@ static void tg3_restart_ints(struct tg3 *tp)
	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
	mmiowb();

	tg3_cond_int(tp);
	if (tg3_has_work(tp))
		tw32(HOSTCC_MODE, tp->coalesce_mode |
		     (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
}

static inline void tg3_netif_stop(struct tg3 *tp)
@@ -2686,8 +2709,8 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
static int tg3_rx(struct tg3 *tp, int budget)
{
	u32 work_mask;
	u32 rx_rcb_ptr = tp->rx_rcb_ptr;
	u16 hw_idx, sw_idx;
	u32 sw_idx = tp->rx_rcb_ptr;
	u16 hw_idx;
	int received;

	hw_idx = tp->hw_status->idx[0].rx_producer;
@@ -2696,7 +2719,6 @@ static int tg3_rx(struct tg3 *tp, int budget)
	 * the opaque cookie.
	 */
	rmb();
	sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
	work_mask = 0;
	received = 0;
	while (sw_idx != hw_idx && budget > 0) {
@@ -2801,14 +2823,19 @@ static int tg3_rx(struct tg3 *tp, int budget)
next_pkt:
		(*post_ptr)++;
next_pkt_nopost:
		rx_rcb_ptr++;
		sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
		sw_idx++;
		sw_idx %= TG3_RX_RCB_RING_SIZE(tp);

		/* Refresh hw_idx to see if there is new work */
		if (sw_idx == hw_idx) {
			hw_idx = tp->hw_status->idx[0].rx_producer;
			rmb();
		}
	}

	/* ACK the status ring. */
	tp->rx_rcb_ptr = rx_rcb_ptr;
	tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
		     (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
	tp->rx_rcb_ptr = sw_idx;
	tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);

	/* Refill RX ring(s). */
	if (work_mask & RXD_OPAQUE_RING_STD) {
@@ -2887,26 +2914,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
	return (done ? 0 : 1);
}

static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
{
	struct tg3_hw_status *sblk = tp->hw_status;
	unsigned int work_exists = 0;

	/* check for phy events */
	if (!(tp->tg3_flags &
	      (TG3_FLAG_USE_LINKCHG_REG |
	       TG3_FLAG_POLL_SERDES))) {
		if (sblk->status & SD_STATUS_LINK_CHG)
			work_exists = 1;
	}
	/* check for RX/TX work to do */
	if (sblk->idx[0].tx_consumer != tp->tx_cons ||
	    sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
		work_exists = 1;

	return work_exists;
}

/* MSI ISR - No need to check for interrupt sharing and no need to
 * flush status block and interrupt mailbox. PCI ordering rules
 * guarantee that MSI will arrive after the status block.
@@ -2930,7 +2937,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
	sblk->status &= ~SD_STATUS_UPDATED;

	if (likely(tg3_has_work(dev, tp)))
	if (likely(tg3_has_work(tp)))
		netif_rx_schedule(dev);		/* schedule NAPI poll */
	else {
		/* no work, re-enable interrupts
@@ -2977,7 +2984,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
		tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
		sblk->status &= ~SD_STATUS_UPDATED;

		if (likely(tg3_has_work(dev, tp)))
		if (likely(tg3_has_work(tp)))
			netif_rx_schedule(dev);		/* schedule NAPI poll */
		else {
			/* no work, shared interrupt perhaps?  re-enable
+2 −0
Original line number Diff line number Diff line
@@ -195,6 +195,8 @@ static inline int ip_finish_output2(struct sk_buff *skb)
	nf_debug_ip_finish_output2(skb);
#endif /*CONFIG_NETFILTER_DEBUG*/

	nf_reset(skb);

	if (hh) {
		int hh_alen;

+2 −2
Original line number Diff line number Diff line
@@ -252,7 +252,7 @@ static int find_pattern(const char *data, size_t dlen,
}

/* Look up to see if we're just after a \n. */
static int find_nl_seq(u16 seq, const struct ip_ct_ftp_master *info, int dir)
static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir)
{
	unsigned int i;

@@ -263,7 +263,7 @@ static int find_nl_seq(u16 seq, const struct ip_ct_ftp_master *info, int dir)
}

/* We don't update if it's older than what we have. */
static void update_nl_seq(u16 nl_seq, struct ip_ct_ftp_master *info, int dir)
static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir)
{
	unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;

+0 −7
Original line number Diff line number Diff line
@@ -431,13 +431,6 @@ static unsigned int ip_conntrack_defrag(unsigned int hooknum,
				        const struct net_device *out,
				        int (*okfn)(struct sk_buff *))
{
#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
	/* Previously seen (loopback)?  Ignore.  Do this before
           fragment check. */
	if ((*pskb)->nfct)
		return NF_ACCEPT;
#endif

	/* Gather fragments. */
	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
		*pskb = ip_ct_gather_frags(*pskb,
+0 −1
Original line number Diff line number Diff line
@@ -3517,7 +3517,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
		goto drop;

	th = skb->h.th;
	__skb_pull(skb, th->doff*4);

	TCP_ECN_accept_cwr(tp, skb);
Loading