Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ea72ab22 authored by Michael Buesch's avatar Michael Buesch Committed by John W. Linville
Browse files

[PATCH] bcm43xx: sync with svn.berlios.de

parent 70e5e983
Loading
Loading
Loading
Loading
+89 −109
Original line number Diff line number Diff line
@@ -214,7 +214,9 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
		return -ENOMEM;
	}
	if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
		printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RINGMEMORY >1G\n");
		printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RINGMEMORY >1G "
				    "(0x%08x, len: %lu)\n",
		       ring->dmabase, BCM43xx_DMA_RINGMEMSIZE);
		dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
				  ring->vbase, ring->dmabase);
		return -ENOMEM;
@@ -261,13 +263,6 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
	return 0;
}

static inline int dmacontroller_rx_reset(struct bcm43xx_dmaring *ring)
{
	assert(!ring->tx);

	return bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
}

/* Reset the RX DMA channel */
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
				   u16 mmio_base)
@@ -308,13 +303,6 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
	return 0;
}

static inline int dmacontroller_tx_reset(struct bcm43xx_dmaring *ring)
{
	assert(ring->tx);

	return bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
}

static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
			       struct bcm43xx_dmadesc *desc,
			       struct bcm43xx_dmadesc_meta *meta,
@@ -337,7 +325,9 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
	if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
		unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
		dev_kfree_skb_any(skb);
		printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RX SKB >1G\n");
		printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RX SKB >1G "
				    "(0x%08x, len: %u)\n",
		       dmaaddr, ring->rx_buffersize);
		return -ENOMEM;
	}
	meta->skb = skb;
@@ -365,7 +355,7 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
{
	int i, err = -ENOMEM;
	struct bcm43xx_dmadesc *desc = NULL;
	struct bcm43xx_dmadesc *desc;
	struct bcm43xx_dmadesc_meta *meta;

	for (i = 0; i < ring->nr_slots; i++) {
@@ -375,24 +365,20 @@ static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
		if (err)
			goto err_unwind;

		assert(ring->used_slots <= ring->nr_slots);
	}
	ring->used_slots = ring->nr_slots;

	err = 0;
out:
	return err;

err_unwind:
	for ( ; i >= 0; i--) {
	for (i--; i >= 0; i--) {
		desc = ring->vbase + i;
		meta = ring->meta + i;

		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
		dev_kfree_skb(meta->skb);
	}
	ring->used_slots = 0;
	goto out;
}

@@ -442,13 +428,13 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
{
	if (ring->tx) {
		dmacontroller_tx_reset(ring);
		bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
		/* Zero out Transmit Descriptor ring address. */
		bcm43xx_write32(ring->bcm,
				ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
				0x00000000);
	} else {
		dmacontroller_rx_reset(ring);
		bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
		/* Zero out Receive Descriptor ring address. */
		bcm43xx_write32(ring->bcm,
				ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
@@ -509,8 +495,6 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
		ring->memoffset = 0;
#endif

	
	spin_lock_init(&ring->lock);
	ring->bcm = bcm;
	ring->nr_slots = nr_descriptor_slots;
	ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
@@ -578,22 +562,25 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)

void bcm43xx_dma_free(struct bcm43xx_private *bcm)
{
	bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring1);
	bcm->current_core->dma->rx_ring1 = NULL;
	bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring0);
	bcm->current_core->dma->rx_ring0 = NULL;
	bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring3);
	bcm->current_core->dma->tx_ring3 = NULL;
	bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring2);
	bcm->current_core->dma->tx_ring2 = NULL;
	bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring1);
	bcm->current_core->dma->tx_ring1 = NULL;
	bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring0);
	bcm->current_core->dma->tx_ring0 = NULL;
	struct bcm43xx_dma *dma = bcm->current_core->dma;

	bcm43xx_destroy_dmaring(dma->rx_ring1);
	dma->rx_ring1 = NULL;
	bcm43xx_destroy_dmaring(dma->rx_ring0);
	dma->rx_ring0 = NULL;
	bcm43xx_destroy_dmaring(dma->tx_ring3);
	dma->tx_ring3 = NULL;
	bcm43xx_destroy_dmaring(dma->tx_ring2);
	dma->tx_ring2 = NULL;
	bcm43xx_destroy_dmaring(dma->tx_ring1);
	dma->tx_ring1 = NULL;
	bcm43xx_destroy_dmaring(dma->tx_ring0);
	dma->tx_ring0 = NULL;
}

int bcm43xx_dma_init(struct bcm43xx_private *bcm)
{
	struct bcm43xx_dma *dma = bcm->current_core->dma;
	struct bcm43xx_dmaring *ring;
	int err = -ENOMEM;

@@ -602,39 +589,39 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
				     BCM43xx_TXRING_SLOTS, 1);
	if (!ring)
		goto out;
	bcm->current_core->dma->tx_ring0 = ring;
	dma->tx_ring0 = ring;

	ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
				     BCM43xx_TXRING_SLOTS, 1);
	if (!ring)
		goto err_destroy_tx0;
	bcm->current_core->dma->tx_ring1 = ring;
	dma->tx_ring1 = ring;

	ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
				     BCM43xx_TXRING_SLOTS, 1);
	if (!ring)
		goto err_destroy_tx1;
	bcm->current_core->dma->tx_ring2 = ring;
	dma->tx_ring2 = ring;

	ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
				     BCM43xx_TXRING_SLOTS, 1);
	if (!ring)
		goto err_destroy_tx2;
	bcm->current_core->dma->tx_ring3 = ring;
	dma->tx_ring3 = ring;

	/* setup RX DMA channels. */
	ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
				     BCM43xx_RXRING_SLOTS, 0);
	if (!ring)
		goto err_destroy_tx3;
	bcm->current_core->dma->rx_ring0 = ring;
	dma->rx_ring0 = ring;

	if (bcm->current_core->rev < 5) {
		ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
					     BCM43xx_RXRING_SLOTS, 0);
		if (!ring)
			goto err_destroy_rx0;
		bcm->current_core->dma->rx_ring1 = ring;
		dma->rx_ring1 = ring;
	}

	dprintk(KERN_INFO PFX "DMA initialized\n");
@@ -643,26 +630,25 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
	return err;

err_destroy_rx0:
	bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring0);
	bcm->current_core->dma->rx_ring0 = NULL;
	bcm43xx_destroy_dmaring(dma->rx_ring0);
	dma->rx_ring0 = NULL;
err_destroy_tx3:
	bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring3);
	bcm->current_core->dma->tx_ring3 = NULL;
	bcm43xx_destroy_dmaring(dma->tx_ring3);
	dma->tx_ring3 = NULL;
err_destroy_tx2:
	bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring2);
	bcm->current_core->dma->tx_ring2 = NULL;
	bcm43xx_destroy_dmaring(dma->tx_ring2);
	dma->tx_ring2 = NULL;
err_destroy_tx1:
	bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring1);
	bcm->current_core->dma->tx_ring1 = NULL;
	bcm43xx_destroy_dmaring(dma->tx_ring1);
	dma->tx_ring1 = NULL;
err_destroy_tx0:
	bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring0);
	bcm->current_core->dma->tx_ring0 = NULL;
	bcm43xx_destroy_dmaring(dma->tx_ring0);
	dma->tx_ring0 = NULL;
	goto out;
}

/* Generate a cookie for the TX header. */
static inline
u16 generate_cookie(struct bcm43xx_dmaring *ring,
static u16 generate_cookie(struct bcm43xx_dmaring *ring,
			   int slot)
{
	u16 cookie = 0x0000;
@@ -693,24 +679,25 @@ u16 generate_cookie(struct bcm43xx_dmaring *ring,
}

/* Inspect a cookie and find out to which controller/slot it belongs. */
static inline
static
struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
				      u16 cookie, int *slot)
{
	struct bcm43xx_dma *dma = bcm->current_core->dma;
	struct bcm43xx_dmaring *ring = NULL;

	switch (cookie & 0xF000) {
	case 0x0000:
		ring = bcm->current_core->dma->tx_ring0;
		ring = dma->tx_ring0;
		break;
	case 0x1000:
		ring = bcm->current_core->dma->tx_ring1;
		ring = dma->tx_ring1;
		break;
	case 0x2000:
		ring = bcm->current_core->dma->tx_ring2;
		ring = dma->tx_ring2;
		break;
	case 0x3000:
		ring = bcm->current_core->dma->tx_ring3;
		ring = dma->tx_ring3;
		break;
	default:
		assert(0);
@@ -721,7 +708,7 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
	return ring;
}

static inline void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
				  int slot)
{
	/* Everything is ready to start. Buffers are DMA mapped and
@@ -736,8 +723,7 @@ static inline void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
			(u32)(slot * sizeof(struct bcm43xx_dmadesc)));
}

static inline
int dma_tx_fragment(struct bcm43xx_dmaring *ring,
static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
			   struct sk_buff *skb,
			   struct ieee80211_txb *txb,
			   u8 cur_frag)
@@ -777,7 +763,9 @@ int dma_tx_fragment(struct bcm43xx_dmaring *ring,
	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
	if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
		return_slot(ring, slot);
		printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA TX SKB >1G\n");
		printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA TX SKB >1G "
				    "(0x%08x, len: %u)\n",
		       meta->dmaaddr, skb->len);
		return -ENOMEM;
	}

@@ -797,7 +785,7 @@ int dma_tx_fragment(struct bcm43xx_dmaring *ring,
	return 0;
}

static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
		   struct ieee80211_txb *txb)
{
	/* We just received a packet from the kernel network subsystem.
@@ -805,6 +793,7 @@ static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
	 * the device to send the stuff.
	 * Note that this is called from atomic context.
	 */
	struct bcm43xx_dmaring *ring = bcm->current_core->dma->tx_ring1;
	u8 i;
	struct sk_buff *skb;

@@ -818,8 +807,6 @@ static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
		return -ENOMEM;
	}

	assert(irqs_disabled());
	spin_lock(&ring->lock);
	for (i = 0; i < txb->nr_frags; i++) {
		skb = txb->fragments[i];
		/* We do not free the skb, as it is freed as
@@ -829,21 +816,11 @@ static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
		dma_tx_fragment(ring, skb, txb, i);
		//TODO: handle failure of dma_tx_fragment
	}
	spin_unlock(&ring->lock);

	return 0;
}

int fastcall
bcm43xx_dma_transfer_txb(struct bcm43xx_private *bcm,
			 struct ieee80211_txb *txb)
{
	return dma_transfer_txb(bcm->current_core->dma->tx_ring1,
				txb);
}

void fastcall
bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
				   struct bcm43xx_xmitstatus *status)
{
	struct bcm43xx_dmaring *ring;
@@ -855,9 +832,6 @@ bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
	ring = parse_cookie(bcm, status->cookie, &slot);
	assert(ring);
	assert(ring->tx);
	assert(irqs_disabled());
	spin_lock(&ring->lock);

	assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
	while (1) {
		assert(slot >= 0 && slot < ring->nr_slots);
@@ -877,12 +851,9 @@ bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
		slot = next_slot(ring, slot);
	}
	bcm->stats.last_tx = jiffies;

	spin_unlock(&ring->lock);
}

static inline
void dma_rx(struct bcm43xx_dmaring *ring,
static void dma_rx(struct bcm43xx_dmaring *ring,
		   int *slot)
{
	struct bcm43xx_dmadesc *desc;
@@ -928,24 +899,37 @@ void dma_rx(struct bcm43xx_dmaring *ring,
			barrier();
			len = le16_to_cpu(rxhdr->frame_length);
		} while (len == 0 && i++ < 5);
		if (len == 0)
		if (unlikely(len == 0)) {
			/* recycle the descriptor buffer. */
			sync_descbuffer_for_device(ring, meta->dmaaddr,
						   ring->rx_buffersize);
			goto drop;
		}
	}
	if (unlikely(len > ring->rx_buffersize)) {
		/* The data did not fit into one descriptor buffer
		 * and is split over multiple buffers.
		 * This should never happen, as we try to allocate buffers
		 * big enough. So simply ignore this packet.
		 */
		int cnt = 1;
		s32 tmp = len - ring->rx_buffersize;
		int cnt = 0;
		s32 tmp = len;

		for ( ; tmp > 0; tmp -= ring->rx_buffersize) {
		while (1) {
			desc = ring->vbase + *slot;
			meta = ring->meta + *slot;
			/* recycle the descriptor buffer. */
			sync_descbuffer_for_device(ring, meta->dmaaddr,
						   ring->rx_buffersize);
			*slot = next_slot(ring, *slot);
			cnt++;
			tmp -= ring->rx_buffersize;
			if (tmp <= 0)
				break;
		}
		printkl(KERN_ERR PFX "DMA RX buffer too small. %d dropped.\n",
		        cnt);
		printkl(KERN_ERR PFX "DMA RX buffer too small "
				     "(len: %u, buffer: %u, nr-dropped: %d)\n",
		        len, ring->rx_buffersize, cnt);
		goto drop;
	}
	len -= IEEE80211_FCS_LEN;
@@ -954,6 +938,8 @@ void dma_rx(struct bcm43xx_dmaring *ring,
	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
	if (unlikely(err)) {
		dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
		sync_descbuffer_for_device(ring, dmaaddr,
					   ring->rx_buffersize);
		goto drop;
	}

@@ -971,8 +957,7 @@ void dma_rx(struct bcm43xx_dmaring *ring,
	return;
}

void fastcall
bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
{
	u32 status;
	u16 descptr;
@@ -982,9 +967,6 @@ bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
#endif

	assert(!ring->tx);
	assert(irqs_disabled());
	spin_lock(&ring->lock);

	status = bcm43xx_read32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_STATUS);
	descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
	current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
@@ -1002,8 +984,6 @@ bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
			ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
			(u32)(slot * sizeof(struct bcm43xx_dmadesc)));
	ring->current_slot = slot;

	spin_unlock(&ring->lock);
}

/* vim: set ts=8 sw=8 sts=8: */
+5 −6
Original line number Diff line number Diff line
@@ -122,7 +122,6 @@ struct bcm43xx_dmadesc_meta {
};

struct bcm43xx_dmaring {
	spinlock_t lock;
	struct bcm43xx_private *bcm;
	/* Kernel virtual base address of the ring memory. */
	struct bcm43xx_dmadesc *vbase;
@@ -166,11 +165,11 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
				   u16 dmacontroller_mmio_base);

int FASTCALL(bcm43xx_dma_transfer_txb(struct bcm43xx_private *bcm,
				      struct ieee80211_txb *txb));
void FASTCALL(bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
					    struct bcm43xx_xmitstatus *status));
void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
				   struct bcm43xx_xmitstatus *status);

void FASTCALL(bcm43xx_dma_rx(struct bcm43xx_dmaring *ring));
int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
		   struct ieee80211_txb *txb);
void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring);

#endif /* BCM43xx_DMA_H_ */
+3 −4
Original line number Diff line number Diff line
@@ -4097,7 +4097,6 @@ int fastcall bcm43xx_rx(struct bcm43xx_private *bcm,
	}

	frame_ctl = le16_to_cpu(wlhdr->frame_ctl);
	
	if ((frame_ctl & IEEE80211_FCTL_PROTECTED) && !bcm->ieee->host_decrypt) {
		frame_ctl &= ~IEEE80211_FCTL_PROTECTED;
		wlhdr->frame_ctl = cpu_to_le16(frame_ctl);		
@@ -4113,12 +4112,12 @@ int fastcall bcm43xx_rx(struct bcm43xx_private *bcm,
			skb_trim(skb, skb->len - 4);
			stats.len -= 8;
		}
		/* do _not_ use wlhdr again without reassigning it */
		wlhdr = (struct ieee80211_hdr_4addr *)(skb->data);
	}
	
	switch (WLAN_FC_GET_TYPE(frame_ctl)) {
	case IEEE80211_FTYPE_MGMT:
		ieee80211_rx_mgt(bcm->ieee, skb->data, &stats);
		ieee80211_rx_mgt(bcm->ieee, wlhdr, &stats);
		break;
	case IEEE80211_FTYPE_DATA:
		if (is_packet_for_us)
@@ -4143,7 +4142,7 @@ static inline int bcm43xx_tx(struct bcm43xx_private *bcm,
	if (bcm->pio_mode)
		err = bcm43xx_pio_transfer_txb(bcm, txb);
	else
		err = bcm43xx_dma_transfer_txb(bcm, txb);
		err = bcm43xx_dma_tx(bcm, txb);

	return err;
}
+1 −1
Original line number Diff line number Diff line
@@ -1161,7 +1161,7 @@ void bcm43xx_phy_lo_b_measure(struct bcm43xx_private *bcm)
	phy->minlowsigpos[1] += 0x0101;

	bcm43xx_phy_write(bcm, 0x002F, phy->minlowsigpos[1]);
	if (radio->version == 2053) {
	if (radio->version == 0x2053) {
		bcm43xx_phy_write(bcm, 0x000A, regstack[2]);
		bcm43xx_phy_write(bcm, 0x002A, regstack[3]);
		bcm43xx_phy_write(bcm, 0x0035, regstack[4]);
+4 −4
Original line number Diff line number Diff line
@@ -467,8 +467,8 @@ static void bcm43xx_calc_nrssi_offset(struct bcm43xx_private *bcm)
		bcm43xx_phy_write(bcm, 0x0003,
				  (bcm43xx_phy_read(bcm, 0x0003) & 0xFF9F)
				  | 0x0040);
		bcm43xx_phy_write(bcm, 0x007A,
				  bcm43xx_phy_read(bcm, 0x007A) | 0x000F);
		bcm43xx_radio_write16(bcm, 0x007A,
				      bcm43xx_radio_read16(bcm, 0x007A) | 0x000F);
		bcm43xx_set_all_gains(bcm, 3, 0, 1);
		bcm43xx_radio_write16(bcm, 0x0043,
				      (bcm43xx_radio_read16(bcm, 0x0043)
@@ -761,8 +761,8 @@ void bcm43xx_calc_nrssi_slope(struct bcm43xx_private *bcm)
		bcm43xx_phy_write(bcm, 0x0802,
				  bcm43xx_phy_read(bcm, 0x0802) | (0x0001 | 0x0002));
		bcm43xx_set_original_gains(bcm);
		bcm43xx_phy_write(bcm, 0x0802,
				  bcm43xx_phy_read(bcm, 0x0802) | 0x8000);
		bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
				  bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS) | 0x8000);
		if (phy->rev >= 3) {
			bcm43xx_phy_write(bcm, 0x0801, backup[14]);
			bcm43xx_phy_write(bcm, 0x0060, backup[15]);