Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aca361c1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (45 commits)
  [PATCH] Restore channel setting after scan.
  [PATCH] hostap: Fix memory leak on PCI probe error path
  [PATCH] hostap: Remove dead code (duplicated idx != 0)
  [PATCH] hostap: Fix unlikely read overrun in CIS parsing
  [PATCH] hostap: Fix double free in prism2_config() error path
  [PATCH] hostap: Fix ap_add_sta() return value verification
  [PATCH] hostap: Fix hw reset after CMDCODE_ACCESS_WRITE timeout
  [PATCH] wireless/airo: cache wireless scans
  [PATCH] wireless/airo: define default MTU
  [PATCH] wireless/airo: clean up printk usage to print device name
  [PATCH] WE-20 for kernel 2.6.16
  [PATCH] softmac: remove function_enter()
  [PATCH] skge: version 1.5
  [PATCH] skge: compute available ring buffers
  [PATCH] skge: dont free skb until multi-part transmit complete
  [PATCH] skge: multicast statistics fix
  [PATCH] skge: rx_reuse called twice
  [PATCH] skge: dont use dev_alloc_skb for rx buffs
  [PATCH] skge: align receive buffers
  [PATCH] sky2: dont need to use dev_kfree_skb_any
  ...
parents cec60620 9b7c8489
Loading
Loading
Loading
Loading
+54 −51
Original line number Original line Diff line number Diff line
@@ -44,7 +44,7 @@
#include "skge.h"
#include "skge.h"


#define DRV_NAME		"skge"
#define DRV_NAME		"skge"
#define DRV_VERSION		"1.4"
#define DRV_VERSION		"1.5"
#define PFX			DRV_NAME " "
#define PFX			DRV_NAME " "


#define DEFAULT_TX_RING_SIZE	128
#define DEFAULT_TX_RING_SIZE	128
@@ -357,7 +357,7 @@ static struct net_device_stats *skge_get_stats(struct net_device *dev)
	skge->net_stats.rx_bytes = data[1];
	skge->net_stats.rx_bytes = data[1];
	skge->net_stats.tx_packets = data[2] + data[4] + data[6];
	skge->net_stats.tx_packets = data[2] + data[4] + data[6];
	skge->net_stats.rx_packets = data[3] + data[5] + data[7];
	skge->net_stats.rx_packets = data[3] + data[5] + data[7];
	skge->net_stats.multicast = data[5] + data[7];
	skge->net_stats.multicast = data[3] + data[5];
	skge->net_stats.collisions = data[10];
	skge->net_stats.collisions = data[10];
	skge->net_stats.tx_aborted_errors = data[12];
	skge->net_stats.tx_aborted_errors = data[12];


@@ -781,7 +781,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
 * Note: DMA address is not changed by chip.
 * Note: DMA address is not changed by chip.
 * 	 MTU not changed while receiver active.
 * 	 MTU not changed while receiver active.
 */
 */
static void skge_rx_reuse(struct skge_element *e, unsigned int size)
static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
{
{
	struct skge_rx_desc *rd = e->desc;
	struct skge_rx_desc *rd = e->desc;


@@ -829,7 +829,7 @@ static int skge_rx_fill(struct skge_port *skge)
	do {
	do {
		struct sk_buff *skb;
		struct sk_buff *skb;


		skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
		skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL);
		if (!skb)
		if (!skb)
			return -ENOMEM;
			return -ENOMEM;


@@ -847,7 +847,6 @@ static void skge_link_up(struct skge_port *skge)
		    LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
		    LED_BLK_OFF|LED_SYNC_OFF|LED_ON);


	netif_carrier_on(skge->netdev);
	netif_carrier_on(skge->netdev);
	if (skge->tx_avail > MAX_SKB_FRAGS + 1)
	netif_wake_queue(skge->netdev);
	netif_wake_queue(skge->netdev);


	if (netif_msg_link(skge))
	if (netif_msg_link(skge))
@@ -2155,7 +2154,7 @@ static int skge_up(struct net_device *dev)
		printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
		printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);


	if (dev->mtu > RX_BUF_SIZE)
	if (dev->mtu > RX_BUF_SIZE)
		skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN;
		skge->rx_buf_size = dev->mtu + ETH_HLEN;
	else
	else
		skge->rx_buf_size = RX_BUF_SIZE;
		skge->rx_buf_size = RX_BUF_SIZE;


@@ -2190,8 +2189,6 @@ static int skge_up(struct net_device *dev)
	if (err)
	if (err)
		goto free_rx_ring;
		goto free_rx_ring;


	skge->tx_avail = skge->tx_ring.count - 1;

	/* Initialize MAC */
	/* Initialize MAC */
	spin_lock_bh(&hw->phy_lock);
	spin_lock_bh(&hw->phy_lock);
	if (hw->chip_id == CHIP_ID_GENESIS)
	if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2294,6 +2291,12 @@ static int skge_down(struct net_device *dev)
	return 0;
	return 0;
}
}


static inline int skge_avail(const struct skge_ring *ring)
{
	return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
		+ (ring->to_clean - ring->to_use) - 1;
}

static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
{
	struct skge_port *skge = netdev_priv(dev);
	struct skge_port *skge = netdev_priv(dev);
@@ -2314,7 +2317,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
		return NETDEV_TX_LOCKED;
		return NETDEV_TX_LOCKED;
	}
	}


	if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
	if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
		if (!netif_queue_stopped(dev)) {
		if (!netif_queue_stopped(dev)) {
			netif_stop_queue(dev);
			netif_stop_queue(dev);


@@ -2390,8 +2393,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
		       dev->name, e - ring->start, skb->len);
		       dev->name, e - ring->start, skb->len);


	ring->to_use = e->next;
	ring->to_use = e->next;
	skge->tx_avail -= skb_shinfo(skb)->nr_frags + 1;
	if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
	if (skge->tx_avail <= MAX_SKB_FRAGS + 1) {
		pr_debug("%s: transmit queue full\n", dev->name);
		pr_debug("%s: transmit queue full\n", dev->name);
		netif_stop_queue(dev);
		netif_stop_queue(dev);
	}
	}
@@ -2404,35 +2406,37 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
	return NETDEV_TX_OK;
	return NETDEV_TX_OK;
}
}


static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
{
{
	/* This ring element can be skb or fragment */
	struct pci_dev *pdev = skge->hw->pdev;
	if (e->skb) {
	struct skge_element *e;
		pci_unmap_single(hw->pdev,

			       pci_unmap_addr(e, mapaddr),
	for (e = skge->tx_ring.to_clean; e != last; e = e->next) {
			       pci_unmap_len(e, maplen),
		struct sk_buff *skb = e->skb;
			       PCI_DMA_TODEVICE);
		int i;
		dev_kfree_skb(e->skb);

		e->skb = NULL;
		e->skb = NULL;
	} else {
		pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
		pci_unmap_page(hw->pdev,
				 skb_headlen(skb), PCI_DMA_TODEVICE);
			       pci_unmap_addr(e, mapaddr),

			       pci_unmap_len(e, maplen),
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			e = e->next;
			pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
				       skb_shinfo(skb)->frags[i].size,
				       PCI_DMA_TODEVICE);
				       PCI_DMA_TODEVICE);
		}
		}

		dev_kfree_skb(skb);
	}
	skge->tx_ring.to_clean = e;
}
}


static void skge_tx_clean(struct skge_port *skge)
static void skge_tx_clean(struct skge_port *skge)
{
{
	struct skge_ring *ring = &skge->tx_ring;
	struct skge_element *e;


	spin_lock_bh(&skge->tx_lock);
	spin_lock_bh(&skge->tx_lock);
	for (e = ring->to_clean; e != ring->to_use; e = e->next) {
	skge_tx_complete(skge, skge->tx_ring.to_use);
		++skge->tx_avail;
	netif_wake_queue(skge->netdev);
		skge_tx_free(skge->hw, e);
	}
	ring->to_clean = e;
	spin_unlock_bh(&skge->tx_lock);
	spin_unlock_bh(&skge->tx_lock);
}
}


@@ -2592,7 +2596,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
		goto error;
		goto error;


	if (len < RX_COPY_THRESHOLD) {
	if (len < RX_COPY_THRESHOLD) {
		skb = dev_alloc_skb(len + 2);
		skb = alloc_skb(len + 2, GFP_ATOMIC);
		if (!skb)
		if (!skb)
			goto resubmit;
			goto resubmit;


@@ -2607,10 +2611,11 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
		skge_rx_reuse(e, skge->rx_buf_size);
		skge_rx_reuse(e, skge->rx_buf_size);
	} else {
	} else {
		struct sk_buff *nskb;
		struct sk_buff *nskb;
		nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
		nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC);
		if (!nskb)
		if (!nskb)
			goto resubmit;
			goto resubmit;


		skb_reserve(nskb, NET_IP_ALIGN);
		pci_unmap_single(skge->hw->pdev,
		pci_unmap_single(skge->hw->pdev,
				 pci_unmap_addr(e, mapaddr),
				 pci_unmap_addr(e, mapaddr),
				 pci_unmap_len(e, maplen),
				 pci_unmap_len(e, maplen),
@@ -2661,30 +2666,29 @@ resubmit:
static void skge_tx_done(struct skge_port *skge)
static void skge_tx_done(struct skge_port *skge)
{
{
	struct skge_ring *ring = &skge->tx_ring;
	struct skge_ring *ring = &skge->tx_ring;
	struct skge_element *e;
	struct skge_element *e, *last;


	spin_lock(&skge->tx_lock);
	spin_lock(&skge->tx_lock);
	for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
	last = ring->to_clean;
	for (e = ring->to_clean; e != ring->to_use; e = e->next) {
		struct skge_tx_desc *td = e->desc;
		struct skge_tx_desc *td = e->desc;
		u32 control;


		rmb();
		if (td->control & BMU_OWN)
		control = td->control;
		if (control & BMU_OWN)
			break;
			break;


		if (td->control & BMU_EOF) {
			last = e->next;
			if (unlikely(netif_msg_tx_done(skge)))
			if (unlikely(netif_msg_tx_done(skge)))
			printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
				printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
			       skge->netdev->name, e - ring->start, td->status);
				       skge->netdev->name, e - ring->start);

		skge_tx_free(skge->hw, e);
		e->skb = NULL;
		++skge->tx_avail;
		}
		}
	ring->to_clean = e;
	}

	skge_tx_complete(skge, last);

	skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
	skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);


	if (skge->tx_avail > MAX_SKB_FRAGS + 1)
	if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
		netif_wake_queue(skge->netdev);
		netif_wake_queue(skge->netdev);


	spin_unlock(&skge->tx_lock);
	spin_unlock(&skge->tx_lock);
@@ -2718,8 +2722,7 @@ static int skge_poll(struct net_device *dev, int *budget)
			netif_receive_skb(skb);
			netif_receive_skb(skb);


			++work_done;
			++work_done;
		} else
		}
			skge_rx_reuse(e, skge->rx_buf_size);
	}
	}
	ring->to_clean = e;
	ring->to_clean = e;


+0 −1
Original line number Original line Diff line number Diff line
@@ -2418,7 +2418,6 @@ struct skge_port {
	int		     port;
	int		     port;


	spinlock_t	     tx_lock;
	spinlock_t	     tx_lock;
	u32		     tx_avail;
	struct skge_ring     tx_ring;
	struct skge_ring     tx_ring;
	struct skge_ring     rx_ring;
	struct skge_ring     rx_ring;


+4 −4
Original line number Original line Diff line number Diff line
@@ -1175,7 +1175,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
		/* just drop the packet if non-linear expansion fails */
		/* just drop the packet if non-linear expansion fails */
		if (skb_header_cloned(skb) &&
		if (skb_header_cloned(skb) &&
		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
			dev_kfree_skb_any(skb);
			dev_kfree_skb(skb);
			goto out_unlock;
			goto out_unlock;
		}
		}


@@ -1324,7 +1324,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
				       PCI_DMA_TODEVICE);
				       PCI_DMA_TODEVICE);
		}
		}


		dev_kfree_skb_any(skb);
		dev_kfree_skb(skb);
	}
	}


	sky2->tx_cons = put;
	sky2->tx_cons = put;
@@ -2484,7 +2484,7 @@ static const struct sky2_stat {
	{ "single_collisions", GM_TXF_SNG_COL },
	{ "single_collisions", GM_TXF_SNG_COL },
	{ "multi_collisions", GM_TXF_MUL_COL },
	{ "multi_collisions", GM_TXF_MUL_COL },


	{ "rx_short",      GM_RXE_SHT },
	{ "rx_short",      GM_RXF_SHT },
	{ "rx_runt", 	   GM_RXE_FRAG },
	{ "rx_runt", 	   GM_RXE_FRAG },
	{ "rx_64_byte_packets", GM_RXF_64B },
	{ "rx_64_byte_packets", GM_RXF_64B },
	{ "rx_65_to_127_byte_packets", GM_RXF_127B },
	{ "rx_65_to_127_byte_packets", GM_RXF_127B },
@@ -2607,7 +2607,7 @@ static struct net_device_stats *sky2_get_stats(struct net_device *dev)
	sky2->net_stats.rx_bytes = data[1];
	sky2->net_stats.rx_bytes = data[1];
	sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
	sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
	sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
	sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
	sky2->net_stats.multicast = data[5] + data[7];
	sky2->net_stats.multicast = data[3] + data[5];
	sky2->net_stats.collisions = data[10];
	sky2->net_stats.collisions = data[10];
	sky2->net_stats.tx_aborted_errors = data[12];
	sky2->net_stats.tx_aborted_errors = data[12];


+9 −0
Original line number Original line Diff line number Diff line
@@ -25,6 +25,15 @@ config NET_RADIO
	  the tools from
	  the tools from
	  <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
	  <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.


config NET_WIRELESS_RTNETLINK
	bool "Wireless Extension API over RtNetlink"
	---help---
	  Support the Wireless Extension API over the RtNetlink socket
	  in addition to the traditional ioctl interface (selected above).

	  For now, few tools use this facility, but it might grow in the
	  future. The only downside is that it adds 4.5 kB to your kernel.

# Note : the cards are obsolete (can't buy them anymore), but the drivers
# Note : the cards are obsolete (can't buy them anymore), but the drivers
# are not, as people are still using them...
# are not, as people are still using them...
comment "Obsolete Wireless cards support (pre-802.11)"
comment "Obsolete Wireless cards support (pre-802.11)"
+294 −161

File changed.

Preview size limit exceeded, changes collapsed.

Loading