Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4cbb34b authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6

parents 0517deed b358492c
Loading
Loading
Loading
Loading
+9 −1
Original line number Original line Diff line number Diff line
@@ -2116,7 +2116,7 @@ M: reinette.chatre@intel.com
L:	linux-wireless@vger.kernel.org
L:	linux-wireless@vger.kernel.org
L:	ipw3945-devel@lists.sourceforge.net
L:	ipw3945-devel@lists.sourceforge.net
W:	http://intellinuxwireless.org
W:	http://intellinuxwireless.org
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rchatre/iwlwifi-2.6.git
T:	git kernel.org:/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-2.6.git
S:	Supported
S:	Supported


IOC3 ETHERNET DRIVER
IOC3 ETHERNET DRIVER
@@ -3280,6 +3280,7 @@ L: linux-wireless@vger.kernel.org
L:	rt2400-devel@lists.sourceforge.net
L:	rt2400-devel@lists.sourceforge.net
W:	http://rt2x00.serialmonkey.com/
W:	http://rt2x00.serialmonkey.com/
S:	Maintained
S:	Maintained
T:	git kernel.org:/pub/scm/linux/kernel/git/ivd/rt2x00.git
F:	drivers/net/wireless/rt2x00/
F:	drivers/net/wireless/rt2x00/


RAMDISK RAM BLOCK DEVICE DRIVER
RAMDISK RAM BLOCK DEVICE DRIVER
@@ -3342,6 +3343,13 @@ L: reiserfs-devel@vger.kernel.org
W:	http://www.namesys.com
W:	http://www.namesys.com
S:	Supported
S:	Supported


RFKILL
P:	Ivo van Doorn
M:	IvDoorn@gmail.com
L:	netdev@vger.kernel.org
S:	Maintained
F:	net/rfkill

ROCKETPORT DRIVER
ROCKETPORT DRIVER
P:	Comtrol Corp.
P:	Comtrol Corp.
W:	http://www.comtrol.com
W:	http://www.comtrol.com
+26 −26
Original line number Original line Diff line number Diff line
@@ -148,7 +148,7 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
						unsigned long offset,
						unsigned long offset,
						enum dma_data_direction dir)
						enum dma_data_direction dir)
{
{
	dma_sync_single_range_for_device(sdev->dev, dma_base,
	dma_sync_single_range_for_device(sdev->dma_dev, dma_base,
					 offset & dma_desc_align_mask,
					 offset & dma_desc_align_mask,
					 dma_desc_sync_size, dir);
					 dma_desc_sync_size, dir);
}
}
@@ -158,7 +158,7 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
					     unsigned long offset,
					     unsigned long offset,
					     enum dma_data_direction dir)
					     enum dma_data_direction dir)
{
{
	dma_sync_single_range_for_cpu(sdev->dev, dma_base,
	dma_sync_single_range_for_cpu(sdev->dma_dev, dma_base,
				      offset & dma_desc_align_mask,
				      offset & dma_desc_align_mask,
				      dma_desc_sync_size, dir);
				      dma_desc_sync_size, dir);
}
}
@@ -613,7 +613,7 @@ static void b44_tx(struct b44 *bp)


		BUG_ON(skb == NULL);
		BUG_ON(skb == NULL);


		dma_unmap_single(bp->sdev->dev,
		dma_unmap_single(bp->sdev->dma_dev,
				 rp->mapping,
				 rp->mapping,
				 skb->len,
				 skb->len,
				 DMA_TO_DEVICE);
				 DMA_TO_DEVICE);
@@ -653,7 +653,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
	if (skb == NULL)
	if (skb == NULL)
		return -ENOMEM;
		return -ENOMEM;


	mapping = dma_map_single(bp->sdev->dev, skb->data,
	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
				 RX_PKT_BUF_SZ,
				 RX_PKT_BUF_SZ,
				 DMA_FROM_DEVICE);
				 DMA_FROM_DEVICE);


@@ -663,19 +663,19 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
		mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
		mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
		/* Sigh... */
		/* Sigh... */
		if (!dma_mapping_error(mapping))
		if (!dma_mapping_error(mapping))
			dma_unmap_single(bp->sdev->dev, mapping,
			dma_unmap_single(bp->sdev->dma_dev, mapping,
					RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
					RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
		dev_kfree_skb_any(skb);
		dev_kfree_skb_any(skb);
		skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
		skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
		if (skb == NULL)
		if (skb == NULL)
			return -ENOMEM;
			return -ENOMEM;
		mapping = dma_map_single(bp->sdev->dev, skb->data,
		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
					 RX_PKT_BUF_SZ,
					 RX_PKT_BUF_SZ,
					 DMA_FROM_DEVICE);
					 DMA_FROM_DEVICE);
		if (dma_mapping_error(mapping) ||
		if (dma_mapping_error(mapping) ||
			mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
			mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
			if (!dma_mapping_error(mapping))
			if (!dma_mapping_error(mapping))
				dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
			dev_kfree_skb_any(skb);
			dev_kfree_skb_any(skb);
			return -ENOMEM;
			return -ENOMEM;
		}
		}
@@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
					     dest_idx * sizeof(dest_desc),
					     dest_idx * sizeof(dest_desc),
					     DMA_BIDIRECTIONAL);
					     DMA_BIDIRECTIONAL);


	dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr),
	dma_sync_single_for_device(bp->sdev->dma_dev, le32_to_cpu(src_desc->addr),
				   RX_PKT_BUF_SZ,
				   RX_PKT_BUF_SZ,
				   DMA_FROM_DEVICE);
				   DMA_FROM_DEVICE);
}
}
@@ -772,7 +772,7 @@ static int b44_rx(struct b44 *bp, int budget)
		struct rx_header *rh;
		struct rx_header *rh;
		u16 len;
		u16 len;


		dma_sync_single_for_cpu(bp->sdev->dev, map,
		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
					    RX_PKT_BUF_SZ,
					    RX_PKT_BUF_SZ,
					    DMA_FROM_DEVICE);
					    DMA_FROM_DEVICE);
		rh = (struct rx_header *) skb->data;
		rh = (struct rx_header *) skb->data;
@@ -806,7 +806,7 @@ static int b44_rx(struct b44 *bp, int budget)
			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
			if (skb_size < 0)
			if (skb_size < 0)
				goto drop_it;
				goto drop_it;
			dma_unmap_single(bp->sdev->dev, map,
			dma_unmap_single(bp->sdev->dma_dev, map,
					 skb_size, DMA_FROM_DEVICE);
					 skb_size, DMA_FROM_DEVICE);
			/* Leave out rx_header */
			/* Leave out rx_header */
                	skb_put(skb, len + RX_PKT_OFFSET);
                	skb_put(skb, len + RX_PKT_OFFSET);
@@ -966,24 +966,24 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
		goto err_out;
		goto err_out;
	}
	}


	mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE);
	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
	if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
	if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
		struct sk_buff *bounce_skb;
		struct sk_buff *bounce_skb;


		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
		if (!dma_mapping_error(mapping))
		if (!dma_mapping_error(mapping))
			dma_unmap_single(bp->sdev->dev, mapping, len,
			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
					DMA_TO_DEVICE);
					DMA_TO_DEVICE);


		bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
		bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
		if (!bounce_skb)
		if (!bounce_skb)
			goto err_out;
			goto err_out;


		mapping = dma_map_single(bp->sdev->dev, bounce_skb->data,
		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
					 len, DMA_TO_DEVICE);
					 len, DMA_TO_DEVICE);
		if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
		if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
			if (!dma_mapping_error(mapping))
			if (!dma_mapping_error(mapping))
				dma_unmap_single(bp->sdev->dev, mapping,
				dma_unmap_single(bp->sdev->dma_dev, mapping,
					 len, DMA_TO_DEVICE);
					 len, DMA_TO_DEVICE);
			dev_kfree_skb_any(bounce_skb);
			dev_kfree_skb_any(bounce_skb);
			goto err_out;
			goto err_out;
@@ -1082,7 +1082,7 @@ static void b44_free_rings(struct b44 *bp)


		if (rp->skb == NULL)
		if (rp->skb == NULL)
			continue;
			continue;
		dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ,
		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
					DMA_FROM_DEVICE);
					DMA_FROM_DEVICE);
		dev_kfree_skb_any(rp->skb);
		dev_kfree_skb_any(rp->skb);
		rp->skb = NULL;
		rp->skb = NULL;
@@ -1094,7 +1094,7 @@ static void b44_free_rings(struct b44 *bp)


		if (rp->skb == NULL)
		if (rp->skb == NULL)
			continue;
			continue;
		dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len,
		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
					DMA_TO_DEVICE);
					DMA_TO_DEVICE);
		dev_kfree_skb_any(rp->skb);
		dev_kfree_skb_any(rp->skb);
		rp->skb = NULL;
		rp->skb = NULL;
@@ -1117,12 +1117,12 @@ static void b44_init_rings(struct b44 *bp)
	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);


	if (bp->flags & B44_FLAG_RX_RING_HACK)
	if (bp->flags & B44_FLAG_RX_RING_HACK)
		dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma,
		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
			                  DMA_TABLE_BYTES,
			                  DMA_TABLE_BYTES,
			                  DMA_BIDIRECTIONAL);
			                  DMA_BIDIRECTIONAL);


	if (bp->flags & B44_FLAG_TX_RING_HACK)
	if (bp->flags & B44_FLAG_TX_RING_HACK)
		dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma,
		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
			                  DMA_TABLE_BYTES,
			                  DMA_TABLE_BYTES,
			                  DMA_TO_DEVICE);
			                  DMA_TO_DEVICE);


@@ -1144,24 +1144,24 @@ static void b44_free_consistent(struct b44 *bp)
	bp->tx_buffers = NULL;
	bp->tx_buffers = NULL;
	if (bp->rx_ring) {
	if (bp->rx_ring) {
		if (bp->flags & B44_FLAG_RX_RING_HACK) {
		if (bp->flags & B44_FLAG_RX_RING_HACK) {
			dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma,
			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
					DMA_TABLE_BYTES,
					DMA_TABLE_BYTES,
					DMA_BIDIRECTIONAL);
					DMA_BIDIRECTIONAL);
			kfree(bp->rx_ring);
			kfree(bp->rx_ring);
		} else
		} else
			dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
					    bp->rx_ring, bp->rx_ring_dma);
					    bp->rx_ring, bp->rx_ring_dma);
		bp->rx_ring = NULL;
		bp->rx_ring = NULL;
		bp->flags &= ~B44_FLAG_RX_RING_HACK;
		bp->flags &= ~B44_FLAG_RX_RING_HACK;
	}
	}
	if (bp->tx_ring) {
	if (bp->tx_ring) {
		if (bp->flags & B44_FLAG_TX_RING_HACK) {
		if (bp->flags & B44_FLAG_TX_RING_HACK) {
			dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma,
			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
					DMA_TABLE_BYTES,
					DMA_TABLE_BYTES,
					DMA_TO_DEVICE);
					DMA_TO_DEVICE);
			kfree(bp->tx_ring);
			kfree(bp->tx_ring);
		} else
		} else
			dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
					    bp->tx_ring, bp->tx_ring_dma);
					    bp->tx_ring, bp->tx_ring_dma);
		bp->tx_ring = NULL;
		bp->tx_ring = NULL;
		bp->flags &= ~B44_FLAG_TX_RING_HACK;
		bp->flags &= ~B44_FLAG_TX_RING_HACK;
@@ -1187,7 +1187,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
		goto out_err;
		goto out_err;


	size = DMA_TABLE_BYTES;
	size = DMA_TABLE_BYTES;
	bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp);
	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp);
	if (!bp->rx_ring) {
	if (!bp->rx_ring) {
		/* Allocation may have failed due to pci_alloc_consistent
		/* Allocation may have failed due to pci_alloc_consistent
		   insisting on use of GFP_DMA, which is more restrictive
		   insisting on use of GFP_DMA, which is more restrictive
@@ -1199,7 +1199,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
		if (!rx_ring)
		if (!rx_ring)
			goto out_err;
			goto out_err;


		rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring,
		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
			                    DMA_TABLE_BYTES,
			                    DMA_TABLE_BYTES,
			                    DMA_BIDIRECTIONAL);
			                    DMA_BIDIRECTIONAL);


@@ -1214,7 +1214,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
		bp->flags |= B44_FLAG_RX_RING_HACK;
		bp->flags |= B44_FLAG_RX_RING_HACK;
	}
	}


	bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp);
	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->tx_ring_dma, gfp);
	if (!bp->tx_ring) {
	if (!bp->tx_ring) {
		/* Allocation may have failed due to dma_alloc_coherent
		/* Allocation may have failed due to dma_alloc_coherent
		   insisting on use of GFP_DMA, which is more restrictive
		   insisting on use of GFP_DMA, which is more restrictive
@@ -1226,7 +1226,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
		if (!tx_ring)
		if (!tx_ring)
			goto out_err;
			goto out_err;


		tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring,
		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
			                    DMA_TABLE_BYTES,
			                    DMA_TABLE_BYTES,
			                    DMA_TO_DEVICE);
			                    DMA_TO_DEVICE);


+8 −3
Original line number Original line Diff line number Diff line
@@ -512,13 +512,18 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
		 data, len);
		 data, len);
	memset(ie_info, 0, sizeof(struct ie_info));
	memset(ie_info, 0, sizeof(struct ie_info));


	while (0 < data_left) {
	while (2 <= data_left) {
		item_id = *pos++;
		item_id = *pos++;
		item_len = *pos++;
		item_len = *pos++;
		data_left -= 2;

		if (data_left < item_len)
			break;


		switch (item_id) {
		switch (item_id) {
		case MFIE_TYPE_GENERIC:
		case MFIE_TYPE_GENERIC:
			if (!memcmp(pos, wpa_oui, OUI_LEN) &&
			if ((OUI_LEN + 1 <= item_len) &&
			    !memcmp(pos, wpa_oui, OUI_LEN) &&
			    pos[OUI_LEN] == 0x01) {
			    pos[OUI_LEN] == 0x01) {
				ie_info->wpa.data = pos - 2;
				ie_info->wpa.data = pos - 2;
				ie_info->wpa.len = item_len + 2;
				ie_info->wpa.len = item_len + 2;
@@ -535,7 +540,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
			break;
			break;
		}
		}
		pos += item_len;
		pos += item_len;
		data_left -= item_len + 2;
		data_left -= item_len;
	}
	}
	pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__,
	pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__,
		 ie_info->wpa.data, ie_info->wpa.len,
		 ie_info->wpa.data, ie_info->wpa.len,
+14 −13
Original line number Original line Diff line number Diff line
@@ -373,10 +373,10 @@ static inline
	dma_addr_t dmaaddr;
	dma_addr_t dmaaddr;


	if (tx) {
	if (tx) {
		dmaaddr = dma_map_single(ring->dev->dev->dev,
		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
					 buf, len, DMA_TO_DEVICE);
					 buf, len, DMA_TO_DEVICE);
	} else {
	} else {
		dmaaddr = dma_map_single(ring->dev->dev->dev,
		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
					 buf, len, DMA_FROM_DEVICE);
					 buf, len, DMA_FROM_DEVICE);
	}
	}


@@ -388,9 +388,10 @@ static inline
			  dma_addr_t addr, size_t len, int tx)
			  dma_addr_t addr, size_t len, int tx)
{
{
	if (tx) {
	if (tx) {
		dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE);
		dma_unmap_single(ring->dev->dev->dma_dev,
				 addr, len, DMA_TO_DEVICE);
	} else {
	} else {
		dma_unmap_single(ring->dev->dev->dev,
		dma_unmap_single(ring->dev->dev->dma_dev,
				 addr, len, DMA_FROM_DEVICE);
				 addr, len, DMA_FROM_DEVICE);
	}
	}
}
}
@@ -400,7 +401,7 @@ static inline
				 dma_addr_t addr, size_t len)
				 dma_addr_t addr, size_t len)
{
{
	B43_WARN_ON(ring->tx);
	B43_WARN_ON(ring->tx);
	dma_sync_single_for_cpu(ring->dev->dev->dev,
	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
				addr, len, DMA_FROM_DEVICE);
				addr, len, DMA_FROM_DEVICE);
}
}


@@ -409,7 +410,7 @@ static inline
				    dma_addr_t addr, size_t len)
				    dma_addr_t addr, size_t len)
{
{
	B43_WARN_ON(ring->tx);
	B43_WARN_ON(ring->tx);
	dma_sync_single_for_device(ring->dev->dev->dev,
	dma_sync_single_for_device(ring->dev->dev->dma_dev,
				   addr, len, DMA_FROM_DEVICE);
				   addr, len, DMA_FROM_DEVICE);
}
}


@@ -425,7 +426,7 @@ static inline


static int alloc_ringmemory(struct b43_dmaring *ring)
static int alloc_ringmemory(struct b43_dmaring *ring)
{
{
	struct device *dev = ring->dev->dev->dev;
	struct device *dma_dev = ring->dev->dev->dma_dev;
	gfp_t flags = GFP_KERNEL;
	gfp_t flags = GFP_KERNEL;


	/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
	/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
@@ -439,7 +440,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
	 */
	 */
	if (ring->type == B43_DMA_64BIT)
	if (ring->type == B43_DMA_64BIT)
		flags |= GFP_DMA;
		flags |= GFP_DMA;
	ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
	ring->descbase = dma_alloc_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
					    &(ring->dmabase), flags);
					    &(ring->dmabase), flags);
	if (!ring->descbase) {
	if (!ring->descbase) {
		b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
		b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
@@ -452,9 +453,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)


static void free_ringmemory(struct b43_dmaring *ring)
static void free_ringmemory(struct b43_dmaring *ring)
{
{
	struct device *dev = ring->dev->dev->dev;
	struct device *dma_dev = ring->dev->dev->dma_dev;


	dma_free_coherent(dev, B43_DMA_RINGMEMSIZE,
	dma_free_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
			  ring->descbase, ring->dmabase);
			  ring->descbase, ring->dmabase);
}
}


@@ -854,7 +855,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
			goto err_kfree_meta;
			goto err_kfree_meta;


		/* test for ability to dma to txhdr_cache */
		/* test for ability to dma to txhdr_cache */
		dma_test = dma_map_single(dev->dev->dev,
		dma_test = dma_map_single(dev->dev->dma_dev,
					  ring->txhdr_cache,
					  ring->txhdr_cache,
					  b43_txhdr_size(dev),
					  b43_txhdr_size(dev),
					  DMA_TO_DEVICE);
					  DMA_TO_DEVICE);
@@ -869,7 +870,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
			if (!ring->txhdr_cache)
			if (!ring->txhdr_cache)
				goto err_kfree_meta;
				goto err_kfree_meta;


			dma_test = dma_map_single(dev->dev->dev,
			dma_test = dma_map_single(dev->dev->dma_dev,
						  ring->txhdr_cache,
						  ring->txhdr_cache,
						  b43_txhdr_size(dev),
						  b43_txhdr_size(dev),
						  DMA_TO_DEVICE);
						  DMA_TO_DEVICE);
@@ -883,7 +884,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
			}
			}
		}
		}


		dma_unmap_single(dev->dev->dev,
		dma_unmap_single(dev->dev->dma_dev,
				 dma_test, b43_txhdr_size(dev),
				 dma_test, b43_txhdr_size(dev),
				 DMA_TO_DEVICE);
				 DMA_TO_DEVICE);
	}
	}
+31 −24
Original line number Original line Diff line number Diff line
@@ -393,11 +393,11 @@ dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
	dma_addr_t dmaaddr;
	dma_addr_t dmaaddr;


	if (tx)
	if (tx)
		dmaaddr = dma_map_single(ring->dev->dev->dev,
		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
					 buf, len,
					 buf, len,
					 DMA_TO_DEVICE);
					 DMA_TO_DEVICE);
	else
	else
		dmaaddr = dma_map_single(ring->dev->dev->dev,
		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
					 buf, len,
					 buf, len,
					 DMA_FROM_DEVICE);
					 DMA_FROM_DEVICE);


@@ -411,11 +411,11 @@ void unmap_descbuffer(struct b43legacy_dmaring *ring,
		      int tx)
		      int tx)
{
{
	if (tx)
	if (tx)
		dma_unmap_single(ring->dev->dev->dev,
		dma_unmap_single(ring->dev->dev->dma_dev,
				 addr, len,
				 addr, len,
				 DMA_TO_DEVICE);
				 DMA_TO_DEVICE);
	else
	else
		dma_unmap_single(ring->dev->dev->dev,
		dma_unmap_single(ring->dev->dev->dma_dev,
				 addr, len,
				 addr, len,
				 DMA_FROM_DEVICE);
				 DMA_FROM_DEVICE);
}
}
@@ -427,7 +427,7 @@ void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
{
{
	B43legacy_WARN_ON(ring->tx);
	B43legacy_WARN_ON(ring->tx);


	dma_sync_single_for_cpu(ring->dev->dev->dev,
	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
				addr, len, DMA_FROM_DEVICE);
				addr, len, DMA_FROM_DEVICE);
}
}


@@ -438,7 +438,7 @@ void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
{
{
	B43legacy_WARN_ON(ring->tx);
	B43legacy_WARN_ON(ring->tx);


	dma_sync_single_for_device(ring->dev->dev->dev,
	dma_sync_single_for_device(ring->dev->dev->dma_dev,
				   addr, len, DMA_FROM_DEVICE);
				   addr, len, DMA_FROM_DEVICE);
}
}


@@ -458,9 +458,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,


static int alloc_ringmemory(struct b43legacy_dmaring *ring)
static int alloc_ringmemory(struct b43legacy_dmaring *ring)
{
{
	struct device *dev = ring->dev->dev->dev;
	struct device *dma_dev = ring->dev->dev->dma_dev;


	ring->descbase = dma_alloc_coherent(dev, B43legacy_DMA_RINGMEMSIZE,
	ring->descbase = dma_alloc_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE,
					    &(ring->dmabase), GFP_KERNEL);
					    &(ring->dmabase), GFP_KERNEL);
	if (!ring->descbase) {
	if (!ring->descbase) {
		b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
		b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
@@ -474,9 +474,9 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)


static void free_ringmemory(struct b43legacy_dmaring *ring)
static void free_ringmemory(struct b43legacy_dmaring *ring)
{
{
	struct device *dev = ring->dev->dev->dev;
	struct device *dma_dev = ring->dev->dev->dma_dev;


	dma_free_coherent(dev, B43legacy_DMA_RINGMEMSIZE,
	dma_free_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE,
			  ring->descbase, ring->dmabase);
			  ring->descbase, ring->dmabase);
}
}


@@ -586,7 +586,8 @@ static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
/* Check if a DMA mapping address is invalid. */
/* Check if a DMA mapping address is invalid. */
static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
					 dma_addr_t addr,
					 dma_addr_t addr,
					size_t buffersize)
					 size_t buffersize,
					 bool dma_to_device)
{
{
	if (unlikely(dma_mapping_error(addr)))
	if (unlikely(dma_mapping_error(addr)))
		return 1;
		return 1;
@@ -594,11 +595,11 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
	switch (ring->type) {
	switch (ring->type) {
	case B43legacy_DMA_30BIT:
	case B43legacy_DMA_30BIT:
		if ((u64)addr + buffersize > (1ULL << 30))
		if ((u64)addr + buffersize > (1ULL << 30))
			return 1;
			goto address_error;
		break;
		break;
	case B43legacy_DMA_32BIT:
	case B43legacy_DMA_32BIT:
		if ((u64)addr + buffersize > (1ULL << 32))
		if ((u64)addr + buffersize > (1ULL << 32))
			return 1;
			goto address_error;
		break;
		break;
	case B43legacy_DMA_64BIT:
	case B43legacy_DMA_64BIT:
		/* Currently we can't have addresses beyond 64 bits in the kernel. */
		/* Currently we can't have addresses beyond 64 bits in the kernel. */
@@ -607,6 +608,12 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,


	/* The address is OK. */
	/* The address is OK. */
	return 0;
	return 0;

address_error:
	/* We can't support this address. Unmap it again. */
	unmap_descbuffer(ring, addr, buffersize, dma_to_device);

	return 1;
}
}


static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
@@ -626,7 +633,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
		return -ENOMEM;
		return -ENOMEM;
	dmaaddr = map_descbuffer(ring, skb->data,
	dmaaddr = map_descbuffer(ring, skb->data,
				 ring->rx_buffersize, 0);
				 ring->rx_buffersize, 0);
	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
		/* ugh. try to realloc in zone_dma */
		/* ugh. try to realloc in zone_dma */
		gfp_flags |= GFP_DMA;
		gfp_flags |= GFP_DMA;


@@ -639,7 +646,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
					 ring->rx_buffersize, 0);
					 ring->rx_buffersize, 0);
	}
	}


	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
		dev_kfree_skb_any(skb);
		dev_kfree_skb_any(skb);
		return -EIO;
		return -EIO;
	}
	}
@@ -886,12 +893,12 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
			goto err_kfree_meta;
			goto err_kfree_meta;


		/* test for ability to dma to txhdr_cache */
		/* test for ability to dma to txhdr_cache */
		dma_test = dma_map_single(dev->dev->dev, ring->txhdr_cache,
		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
					  sizeof(struct b43legacy_txhdr_fw3),
					  sizeof(struct b43legacy_txhdr_fw3),
					  DMA_TO_DEVICE);
					  DMA_TO_DEVICE);


		if (b43legacy_dma_mapping_error(ring, dma_test,
		if (b43legacy_dma_mapping_error(ring, dma_test,
					sizeof(struct b43legacy_txhdr_fw3))) {
					sizeof(struct b43legacy_txhdr_fw3), 1)) {
			/* ugh realloc */
			/* ugh realloc */
			kfree(ring->txhdr_cache);
			kfree(ring->txhdr_cache);
			ring->txhdr_cache = kcalloc(nr_slots,
			ring->txhdr_cache = kcalloc(nr_slots,
@@ -900,17 +907,17 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
			if (!ring->txhdr_cache)
			if (!ring->txhdr_cache)
				goto err_kfree_meta;
				goto err_kfree_meta;


			dma_test = dma_map_single(dev->dev->dev,
			dma_test = dma_map_single(dev->dev->dma_dev,
					ring->txhdr_cache,
					ring->txhdr_cache,
					sizeof(struct b43legacy_txhdr_fw3),
					sizeof(struct b43legacy_txhdr_fw3),
					DMA_TO_DEVICE);
					DMA_TO_DEVICE);


			if (b43legacy_dma_mapping_error(ring, dma_test,
			if (b43legacy_dma_mapping_error(ring, dma_test,
					sizeof(struct b43legacy_txhdr_fw3)))
					sizeof(struct b43legacy_txhdr_fw3), 1))
				goto err_kfree_txhdr_cache;
				goto err_kfree_txhdr_cache;
		}
		}


		dma_unmap_single(dev->dev->dev,
		dma_unmap_single(dev->dev->dma_dev,
				 dma_test, sizeof(struct b43legacy_txhdr_fw3),
				 dma_test, sizeof(struct b43legacy_txhdr_fw3),
				 DMA_TO_DEVICE);
				 DMA_TO_DEVICE);
	}
	}
@@ -1235,7 +1242,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
					   sizeof(struct b43legacy_txhdr_fw3), 1);
					   sizeof(struct b43legacy_txhdr_fw3), 1);
	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
					sizeof(struct b43legacy_txhdr_fw3))) {
					sizeof(struct b43legacy_txhdr_fw3), 1)) {
		ring->current_slot = old_top_slot;
		ring->current_slot = old_top_slot;
		ring->used_slots = old_used_slots;
		ring->used_slots = old_used_slots;
		return -EIO;
		return -EIO;
@@ -1254,7 +1261,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,


	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
	/* create a bounce buffer in zone_dma on mapping failure. */
	/* create a bounce buffer in zone_dma on mapping failure. */
	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
		bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!bounce_skb) {
		if (!bounce_skb) {
			ring->current_slot = old_top_slot;
			ring->current_slot = old_top_slot;
@@ -1268,7 +1275,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
		skb = bounce_skb;
		skb = bounce_skb;
		meta->skb = skb;
		meta->skb = skb;
		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
			ring->current_slot = old_top_slot;
			ring->current_slot = old_top_slot;
			ring->used_slots = old_used_slots;
			ring->used_slots = old_used_slots;
			err = -EIO;
			err = -EIO;
Loading