Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f49809fe authored by Linus Torvalds's avatar Linus Torvalds
Browse files
parents cdbbde14 c1ef1f35
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1555,6 +1555,7 @@ config SIS900
	tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
	depends on NET_PCI && PCI
	select CRC32
	select MII
	---help---
	  This is a driver for the Fast Ethernet PCI network cards based on
	  the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in
+86 −17
Original line number Diff line number Diff line
@@ -81,6 +81,7 @@
 *			   cause DMA to kfree'd memory.
 *	0.31: 14 Nov 2004: ethtool support for getting/setting link
 *	                   capabilities.
 *	0.32: 16 Apr 2005: RX_ERROR4 handling added.
 *
 * Known bugs:
 * We suspect that on some hardware no TX done interrupts are generated.
@@ -92,7 +93,7 @@
 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
 * superfluous timer interrupts from the nic.
 */
#define FORCEDETH_VERSION		"0.31"
#define FORCEDETH_VERSION		"0.32"
#define DRV_NAME			"forcedeth"

#include <linux/module.h>
@@ -109,6 +110,7 @@
#include <linux/mii.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/if_vlan.h>

#include <asm/irq.h>
#include <asm/io.h>
@@ -1013,6 +1015,59 @@ static void nv_tx_timeout(struct net_device *dev)
	spin_unlock_irq(&np->lock);
}

/*
 * Called when the nic notices a mismatch between the actual data len on the
 * wire and the len indicated in the 802 header
 */
static int nv_getlen(struct net_device *dev, void *packet, int datalen)
{
	int hdrlen;	/* length of the 802 header */
	int protolen;	/* length as stored in the proto field */

	/* 1) calculate len according to header */
	if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
		protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
		hdrlen = VLAN_HLEN;
	} else {
		protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
		hdrlen = ETH_HLEN;
	}
	dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
				dev->name, datalen, protolen, hdrlen);
	if (protolen > ETH_DATA_LEN)
		return datalen; /* Value in proto field not a len, no checks possible */

	protolen += hdrlen;
	/* consistency checks: */
	if (datalen > ETH_ZLEN) {
		if (datalen >= protolen) {
			/* more data on wire than in 802 header, trim of
			 * additional data.
			 */
			dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
					dev->name, protolen);
			return protolen;
		} else {
			/* less data on wire than mentioned in header.
			 * Discard the packet.
			 */
			dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
					dev->name);
			return -1;
		}
	} else {
		/* short packet. Accept only if 802 values are also short */
		if (protolen > ETH_ZLEN) {
			dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
					dev->name);
			return -1;
		}
		dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
				dev->name, datalen);
		return datalen;
	}
}

static void nv_rx_process(struct net_device *dev)
{
	struct fe_priv *np = get_nvpriv(dev);
@@ -1064,7 +1119,7 @@ static void nv_rx_process(struct net_device *dev)
				np->stats.rx_errors++;
				goto next_pkt;
			}
			if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) {
			if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
				np->stats.rx_errors++;
				goto next_pkt;
			}
@@ -1078,22 +1133,24 @@ static void nv_rx_process(struct net_device *dev)
				np->stats.rx_errors++;
				goto next_pkt;
			}
			if (Flags & NV_RX_ERROR) {
				/* framing errors are soft errors, the rest is fatal. */
			if (Flags & NV_RX_ERROR4) {
				len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
				if (len < 0) {
					np->stats.rx_errors++;
					goto next_pkt;
				}
			}
			/* framing errors are soft errors. */
			if (Flags & NV_RX_FRAMINGERR) {
				if (Flags & NV_RX_SUBSTRACT1) {
					len--;
				}
				} else {
					np->stats.rx_errors++;
					goto next_pkt;
				}
			}
		} else {
			if (!(Flags & NV_RX2_DESCRIPTORVALID))
				goto next_pkt;

			if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4)) {
			if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
				np->stats.rx_errors++;
				goto next_pkt;
			}
@@ -1107,16 +1164,18 @@ static void nv_rx_process(struct net_device *dev)
				np->stats.rx_errors++;
				goto next_pkt;
			}
			if (Flags & NV_RX2_ERROR) {
				/* framing errors are soft errors, the rest is fatal. */
			if (Flags & NV_RX2_ERROR4) {
				len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
				if (len < 0) {
					np->stats.rx_errors++;
					goto next_pkt;
				}
			}
			/* framing errors are soft errors */
			if (Flags & NV_RX2_FRAMINGERR) {
				if (Flags & NV_RX2_SUBSTRACT1) {
					len--;
				}
				} else {
					np->stats.rx_errors++;
					goto next_pkt;
				}
			}
			Flags &= NV_RX2_CHECKSUMMASK;
			if (Flags == NV_RX2_CHECKSUMOK1 ||
@@ -1480,6 +1539,13 @@ static void nv_do_nic_poll(unsigned long data)
	enable_irq(dev->irq);
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void nv_poll_controller(struct net_device *dev)
{
	nv_do_nic_poll((unsigned long) dev);
}
#endif

static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
	struct fe_priv *np = get_nvpriv(dev);
@@ -1962,6 +2028,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
	dev->get_stats = nv_get_stats;
	dev->change_mtu = nv_change_mtu;
	dev->set_multicast_list = nv_set_multicast;
#ifdef CONFIG_NET_POLL_CONTROLLER
	dev->poll_controller = nv_poll_controller;
#endif
	SET_ETHTOOL_OPS(dev, &ops);
	dev->tx_timeout = nv_tx_timeout;
	dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
+23 −9
Original line number Diff line number Diff line
@@ -924,7 +924,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,

	spin_lock_irqsave(&cnx->lock, flags);

	if (! cnx->state & VETH_STATE_READY)
	if (! (cnx->state & VETH_STATE_READY))
		goto drop;

	if ((skb->len - 14) > VETH_MAX_MTU)
@@ -1023,6 +1023,8 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)

	lpmask = veth_transmit_to_many(skb, lpmask, dev);

	dev->trans_start = jiffies;

	if (! lpmask) {
		dev_kfree_skb(skb);
	} else {
@@ -1262,13 +1264,18 @@ static void veth_receive(struct veth_lpar_connection *cnx,

		vlan = skb->data[9];
		dev = veth_dev[vlan];
		if (! dev)
			/* Some earlier versions of the driver sent
			   broadcasts down all connections, even to
			   lpars that weren't on the relevant vlan.
			   So ignore packets belonging to a vlan we're
			   not on. */
		if (! dev) {
			/*
			 * Some earlier versions of the driver sent
			 * broadcasts down all connections, even to lpars
			 * that weren't on the relevant vlan. So ignore
			 * packets belonging to a vlan we're not on.
			 * We can also be here if we receive packets while
			 * the driver is going down, because then dev is NULL.
			 */
			dev_kfree_skb_irq(skb);
			continue;
		}

		port = (struct veth_port *)dev->priv;
		dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000;
@@ -1381,18 +1388,25 @@ void __exit veth_module_cleanup(void)
{
	int i;

	vio_unregister_driver(&veth_driver);
	/* Stop the queues first to stop any new packets being sent. */
	for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++)
		if (veth_dev[i])
			netif_stop_queue(veth_dev[i]);

	/* Stop the connections before we unregister the driver. This
	 * ensures there's no skbs lying around holding the device open. */
	for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
		veth_stop_connection(i);

	HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);

	/* Hypervisor callbacks may have scheduled more work while we
	 * were destroying connections. Now that we've disconnected from
	 * were stoping connections. Now that we've disconnected from
	 * the hypervisor make sure everything's finished. */
	flush_scheduled_work();

	vio_unregister_driver(&veth_driver);

	for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
		veth_destroy_connection(i);

+3 −3
Original line number Diff line number Diff line
@@ -2433,8 +2433,8 @@ static void __set_rx_mode(struct net_device *dev)
		rx_mode = RxFilterEnable | AcceptBroadcast
			| AcceptMulticast | AcceptMyPhys;
		for (i = 0; i < 64; i += 2) {
			writew(HASH_TABLE + i, ioaddr + RxFilterAddr);
			writew((mc_filter[i+1]<<8) + mc_filter[i],
			writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
			writel((mc_filter[i + 1] << 8) + mc_filter[i],
			       ioaddr + RxFilterData);
		}
	}
+29 −40
Original line number Diff line number Diff line
#define _VERSION "0.20"
#define VERSION "0.22"
/* ns83820.c by Benjamin LaHaise with contributions.
 *
 * Questions/comments/discussion to linux-ns83820@kvack.org.
@@ -63,9 +63,11 @@
 *			     -	fix missed txok introduced during performance
 *				tuning
 *			0.20 -	fix stupid RFEN thinko.  i am such a smurf.
 *
 *	20040828	0.21 -	add hardware vlan accleration
 *				by Neil Horman <nhorman@redhat.com>
 *	20050406	0.22 -	improved DAC ifdefs from Andi Kleen	
 *			     -	removal of dead code from Adrian Bunk
 *			     -	fix half duplex collision behaviour
 * Driver Overview
 * ===============
 *
@@ -129,18 +131,6 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */
#undef Dprintk
#define	Dprintk			dprintk

#if defined(CONFIG_HIGHMEM64G) || defined(__ia64__)
#define USE_64BIT_ADDR	"+"
#endif

#if defined(USE_64BIT_ADDR)
#define	VERSION	_VERSION USE_64BIT_ADDR
#define TRY_DAC	1
#else
#define	VERSION	_VERSION
#define TRY_DAC	0
#endif

/* tunables */
#define RX_BUF_SIZE	1500	/* 8192 */
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -386,22 +376,16 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */
#define LINK_DOWN		0x02
#define LINK_UP			0x04

#ifdef USE_64BIT_ADDR
#define HW_ADDR_LEN	8
#define HW_ADDR_LEN	sizeof(dma_addr_t) 
#define desc_addr_set(desc, addr)				\
	do {							\
		u64 __addr = (addr);				\
		(desc)[0] = cpu_to_le32(__addr);		\
		(desc)[1] = cpu_to_le32(__addr >> 32);		\
		((desc)[0] = cpu_to_le32(addr));		\
		if (HW_ADDR_LEN == 8)		 		\
			(desc)[1] = cpu_to_le32(((u64)addr) >> 32);	\
	} while(0)
#define desc_addr_get(desc)					\
		(((u64)le32_to_cpu((desc)[1]) << 32)		\
		     | le32_to_cpu((desc)[0]))
#else
#define HW_ADDR_LEN	4
#define desc_addr_set(desc, addr)	((desc)[0] = cpu_to_le32(addr))
#define desc_addr_get(desc)		(le32_to_cpu((desc)[0]))
#endif
	(le32_to_cpu((desc)[0]) | \
	(HW_ADDR_LEN == 8 ? ((dma_addr_t)le32_to_cpu((desc)[1]))<<32 : 0))

#define DESC_LINK		0
#define DESC_BUFPTR		(DESC_LINK + HW_ADDR_LEN/4)
@@ -727,11 +711,23 @@ static void fastcall phy_intr(struct net_device *ndev)
		speed = ((cfg / CFG_SPDSTS0) & 3);
		fullduplex = (cfg & CFG_DUPSTS);

		if (fullduplex)
		if (fullduplex) {
			new_cfg |= CFG_SB;
			writel(readl(dev->base + TXCFG)
					| TXCFG_CSI | TXCFG_HBI,
			       dev->base + TXCFG);
			writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
			       dev->base + RXCFG);
		} else {
			writel(readl(dev->base + TXCFG)
					& ~(TXCFG_CSI | TXCFG_HBI),
			       dev->base + TXCFG);
			writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD),
			       dev->base + RXCFG);
		}

		if ((cfg & CFG_LNKSTS) &&
		    ((new_cfg ^ dev->CFG_cache) & CFG_MODE_1000)) {
		    ((new_cfg ^ dev->CFG_cache) != 0)) {
			writel(new_cfg, dev->base + CFG);
			dev->CFG_cache = new_cfg;
		}
@@ -1189,7 +1185,6 @@ static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)

	for (;;) {
		volatile u32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
		u32 residue = 0;

		dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len,
			(unsigned long long)buf);
@@ -1199,17 +1194,11 @@ static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
		desc_addr_set(desc + DESC_BUFPTR, buf);
		desc[DESC_EXTSTS] = cpu_to_le32(extsts);

		cmdsts = ((nr_frags|residue) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0);
		cmdsts = ((nr_frags) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0);
		cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN;
		cmdsts |= len;
		desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);

		if (residue) {
			buf += len;
			len = residue;
			continue;
		}

		if (!nr_frags)
			break;

@@ -1841,7 +1830,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
	int using_dac = 0;

	/* See if we can set the dma mask early on; failure is fatal. */
	if (TRY_DAC && !pci_set_dma_mask(pci_dev, 0xffffffffffffffffULL)) {
	if (sizeof(dma_addr_t) == 8 && 
	 	!pci_set_dma_mask(pci_dev, 0xffffffffffffffffULL)) {
		using_dac = 1;
	} else if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
		using_dac = 0;
@@ -1972,9 +1962,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
	/* When compiled with 64 bit addressing, we must always enable
	 * the 64 bit descriptor format.
	 */
#ifdef USE_64BIT_ADDR
	if (sizeof(dma_addr_t) == 8) 
		dev->CFG_cache |= CFG_M64ADDR;
#endif
	if (using_dac)
		dev->CFG_cache |= CFG_T64ADDR;

Loading