Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 36e319bd authored by Roland Vossen's avatar Roland Vossen Committed by Greg Kroah-Hartman
Browse files

staging: brcm80211: removed more unused dma32 code



Since two preprocessor defines are always '1', could remove code that was
never compiled in and removed references to these preprocessor defines
(DMA64_ENAB and DMA64_MODE).

Signed-off-by: default avatarRoland Vossen <rvossen@broadcom.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 2fd31011
Loading
Loading
Loading
Loading
+162 −232
Original line number Diff line number Diff line
@@ -151,9 +151,6 @@ typedef struct dma_info {
	bool aligndesc_4k;	/* descriptor base need to be aligned or not */
} dma_info_t;

#define	DMA64_ENAB(di)		1
#define	DMA64_MODE(di)		1

/* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
#ifdef BCMDMASGLISTOSL
#define DMASGLIST_ENAB true
@@ -380,11 +377,7 @@ struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
	/* old chips w/o sb is no longer supported */
	ASSERT(sih != NULL);

	if (DMA64_ENAB(di))
		di->dma64 =
		    ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
	else
		di->dma64 = 0;
	di->dma64 = ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);

	/* check arguments */
	ASSERT(ISPOWEROF2(ntxd));
@@ -396,17 +389,11 @@ struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
		ASSERT(dmaregstx == NULL);

	/* init dma reg pointer */
	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	ASSERT(ntxd <= D64MAXDD);
	ASSERT(nrxd <= D64MAXDD);
	di->d64txregs = (dma64regs_t *) dmaregstx;
	di->d64rxregs = (dma64regs_t *) dmaregsrx;
	di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
	} else {
		DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
		ASSERT(0);
		goto fail;
	}

	/* Default flags (which can be changed by the driver calling dma_ctrlflags
	 * before enable): For backwards compatibility both Rx Overflow Continue
@@ -416,7 +403,11 @@ struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
	di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
				    0);

	DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (DMA64_MODE(di) ? "DMA64" : "DMA32"), osh, di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
	DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d "
		   "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
		   "dmaregstx %p dmaregsrx %p\n", name, "DMA64", osh,
		   di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize,
		   rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));

	/* make a private copy of our callers name */
	strncpy(di->name, name, MAXNAMEL);
@@ -450,15 +441,9 @@ struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
	di->dataoffsetlow = 0;
	/* for pci bus, add offset */
	if (sih->bustype == PCI_BUS) {
		if ((sih->buscoretype == PCIE_CORE_ID) && DMA64_MODE(di)) {
		/* pcie with DMA64 */
		di->ddoffsetlow = 0;
		di->ddoffsethigh = SI_PCIE_DMA_H32;
		} else {
			/* pci(DMA32/DMA64) or pcie with DMA32 */
			di->ddoffsetlow = SI_PCI_DMA;
			di->ddoffsethigh = 0;
		}
		di->dataoffsetlow = di->ddoffsetlow;
		di->dataoffsethigh = di->ddoffsethigh;
	}
@@ -478,14 +463,11 @@ struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
	/* does the descriptors need to be aligned and if yes, on 4K/8K or not */
	di->aligndesc_4k = _dma_descriptor_align(di);
	if (di->aligndesc_4k) {
		if (DMA64_MODE(di)) {
		di->dmadesc_align = D64RINGALIGN_BITS;
		if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
				/* for smaller dd table, HW relax the alignment requirement */
			/* for smaller dd table, HW relax alignment reqmnt */
			di->dmadesc_align = D64RINGALIGN_BITS - 1;
		}
		} else
			di->dmadesc_align = D32RINGALIGN_BITS;
	} else
		di->dmadesc_align = 4;	/* 16 byte alignment */

@@ -659,10 +641,7 @@ static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs)

static bool _dma_alloc(dma_info_t *di, uint direction)
{
	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	return dma64_alloc(di, direction);
	} else
		ASSERT(0);
}

/* !! may be called with core in reset */
@@ -676,7 +655,6 @@ static void _dma_detach(dma_info_t *di)
	ASSERT(di->rxin == di->rxout);

	/* free dma descriptor rings */
	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	if (di->txd64)
		DMA_FREE_CONSISTENT(di->osh,
				    ((s8 *)di->txd64 -
@@ -687,8 +665,6 @@ static void _dma_detach(dma_info_t *di)
				    ((s8 *)di->rxd64 -
				    di->rxdalign), di->rxdalloc,
				    (di->rxdpaorig), &di->rx_dmah);
	} else
		ASSERT(0);

	/* free packet pointer vectors */
	if (di->txp)
@@ -711,7 +687,6 @@ static void _dma_detach(dma_info_t *di)

static bool _dma_descriptor_align(dma_info_t *di)
{
	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	u32 addrl;

	/* Check to see if the descriptors need to be aligned on 4K/8K or not */
@@ -726,41 +701,36 @@ static bool _dma_descriptor_align(dma_info_t *di)
		if (addrl != 0)
			return false;
	}
	}
	return true;
}

/* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
static bool _dma_isaddrext(dma_info_t *di)
{
	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	/* DMA64 supports full 32- or 64-bit operation. AE is always valid */

	/* not all tx or rx channel are available */
	if (di->d64txregs != NULL) {
		if (!_dma64_addrext(di->osh, di->d64txregs)) {
				DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
			DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
				   "AE set\n", di->name));
			ASSERT(0);
		}
		return true;
	} else if (di->d64rxregs != NULL) {
		if (!_dma64_addrext(di->osh, di->d64rxregs)) {
				DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
			DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
				   "AE set\n", di->name));
			ASSERT(0);
		}
		return true;
	}
	return false;
	} else
		ASSERT(0);

	return false;
}

/* initialize descriptor table base address */
static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
{
	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	if (!di->aligndesc_4k) {
		if (direction == DMA_TX)
			di->xmtptrbase = PHYSADDRLO(pa);
@@ -808,18 +778,13 @@ static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
				D64_RC_AE, (ae << D64_RC_AE_SHIFT));
		}
	}
	} else
		ASSERT(0);
}

static void _dma_fifoloopbackenable(dma_info_t *di)
{
	DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));

	if (DMA64_ENAB(di) && DMA64_MODE(di))
	OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE);
	else
		ASSERT(0);
}

static void _dma_rxinit(dma_info_t *di)
@@ -832,7 +797,6 @@ static void _dma_rxinit(dma_info_t *di)
	di->rxin = di->rxout = 0;

	/* clear rx descriptor ring */
	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	memset((void *)di->rxd64, '\0',
		(di->nrxd * sizeof(dma64dd_t)));

@@ -846,18 +810,16 @@ static void _dma_rxinit(dma_info_t *di)

	if (di->aligndesc_4k)
		_dma_ddtable_init(di, DMA_RX, di->rxdpa);
	} else
		ASSERT(0);
}

static void _dma_rxenable(dma_info_t *di)
{
	uint dmactrlflags = di->hnddma.dmactrlflags;
	u32 control;

	DMA_TRACE(("%s: dma_rxenable\n", di->name));

	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
		u32 control =
	control =
	    (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) |
	    D64_RC_RE;

@@ -869,8 +831,6 @@ static void _dma_rxenable(dma_info_t *di)

	W_REG(di->osh, &di->d64rxregs->control,
		((di->rxoffset << D64_RC_RO_SHIFT) | control));
	} else
		ASSERT(0);
}

static void
@@ -936,14 +896,11 @@ static void *BCMFASTPATH _dma_rx(dma_info_t *di)
		if (resid > 0) {
			uint cur;
			ASSERT(p == NULL);
			cur = (DMA64_ENAB(di) && DMA64_MODE(di)) ?
			cur =
			    B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
				  D64_RS0_CD_MASK) -
				 di->rcvptrbase) & D64_RS0_CD_MASK,
				dma64dd_t) : B2I(R_REG(di->osh,
						       &di->d32rxregs->
						       status) & RS_CD_MASK,
						 dma32dd_t);
				dma64dd_t);
			DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
				   di->rxin, di->rxout, cur));
		}
@@ -1005,15 +962,11 @@ static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
		if (p == NULL) {
			DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
				   di->name));
			if (i == 0) {
				if (DMA64_ENAB(di) && DMA64_MODE(di)) {
					if (dma64_rxidle(di)) {
						DMA_ERROR(("%s: rxfill64: ring is empty !\n", di->name));
			if (i == 0 && dma64_rxidle(di)) {
				DMA_ERROR(("%s: rxfill64: ring is empty !\n",
					   di->name));
				ring_empty = true;
			}
				} else
					ASSERT(0);
			}
			di->hnddma.rxnobuf++;
			break;
		}
@@ -1041,25 +994,19 @@ static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)

		/* reset flags for each descriptor */
		flags = 0;
		if (DMA64_ENAB(di) && DMA64_MODE(di)) {
		if (rxout == (di->nrxd - 1))
			flags = D64_CTRL1_EOT;

		dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
			     di->rxbufsize);
		} else
			ASSERT(0);
		rxout = NEXTRXD(rxout);
	}

	di->rxout = rxout;

	/* update the chip lastdscr pointer */
	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	W_REG(di->osh, &di->d64rxregs->ptr,
	      di->rcvptrbase + I2B(rxout, dma64dd_t));
	} else
		ASSERT(0);

	return ring_empty;
}
@@ -1072,13 +1019,10 @@ static void *_dma_peeknexttxp(dma_info_t *di)
	if (di->ntxd == 0)
		return NULL;

	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	end =
	    B2I(((R_REG(di->osh, &di->d64txregs->status0) &
		  D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
		  dma64dd_t);
	} else
		ASSERT(0);

	for (i = di->txin; i != end; i = NEXTTXD(i))
		if (di->txp[i])
@@ -1095,13 +1039,10 @@ static void *_dma_peeknextrxp(dma_info_t *di)
	if (di->nrxd == 0)
		return NULL;

	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	end =
	    B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
		  D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
		  dma64dd_t);
	} else
		ASSERT(0);

	for (i = di->rxin; i != end; i = NEXTRXD(i))
		if (di->rxp[i])
@@ -1125,10 +1066,7 @@ static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
	if (di->nrxd == 0)
		return NULL;

	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	return dma64_getnextrxp(di, forceall);
	} else
		ASSERT(0);
}

static void _dma_txblock(dma_info_t *di)
@@ -1150,13 +1088,10 @@ static uint _dma_txpending(dma_info_t *di)
{
	uint curr;

	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	curr =
	    B2I(((R_REG(di->osh, &di->d64txregs->status0) &
		  D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
		  dma64dd_t);
	} else
		ASSERT(0);

	return NTXDACTIVE(curr, di->txout);
}
@@ -1169,10 +1104,7 @@ static uint _dma_txcommitted(dma_info_t *di)
	if (txin == di->txout)
		return 0;

	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
	ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t);
	} else
		ASSERT(0);

	return NTXDACTIVE(di->txin, ptr);
}
@@ -1208,7 +1140,6 @@ static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
	if (dmactrlflags & DMA_CTRL_PEN) {
		u32 control;

		if (DMA64_ENAB(di) && DMA64_MODE(di)) {
		control = R_REG(di->osh, &di->d64txregs->control);
		W_REG(di->osh, &di->d64txregs->control,
		      control | D64_XC_PD);
@@ -1222,8 +1153,6 @@ static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
			/* Not supported, don't allow it to be enabled */
			dmactrlflags &= ~DMA_CTRL_PEN;
		}
		} else
			ASSERT(0);
	}

	di->hnddma.dmactrlflags = dmactrlflags;
@@ -2561,3 +2490,4 @@ uint dma_addrwidth(si_t *sih, void *dmaregs)
	/* Fallthru */
	return DMADDRWIDTH_30;
}