Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 78aea4fc authored by Szymon Janc's avatar Szymon Janc Committed by David S. Miller
Browse files

forcedeth: fix multiple code style issues

parent 47c05314
Loading
Loading
Loading
Loading
+135 −166
Original line number Diff line number Diff line
@@ -911,7 +911,7 @@ static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
 * Power down phy when interface is down (persists through reboot;
 * older Linux and other OSes may not power it up again)
 */
static int phy_power_down = 0;
static int phy_power_down;

static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{
@@ -984,12 +984,10 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
	u8 __iomem *base = get_hwbase(dev);

	if (!nv_optimized(np)) {
		if (rxtx_flags & NV_SETUP_RX_RING) {
		if (rxtx_flags & NV_SETUP_RX_RING)
			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
		}
		if (rxtx_flags & NV_SETUP_TX_RING) {
		if (rxtx_flags & NV_SETUP_TX_RING)
			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
		}
	} else {
		if (rxtx_flags & NV_SETUP_RX_RING) {
			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1174,9 +1172,8 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
	unsigned int tries = 0;

	miicontrol = BMCR_RESET | bmcr_setup;
	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
		return -1;
	}

	/* wait for 500ms */
	msleep(500);
@@ -1313,8 +1310,7 @@ static int phy_init(struct net_device *dev)
			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
			return PHY_ERROR;
		}
	}
	else
	} else
		np->gigabit = 0;

	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1501,12 +1497,10 @@ static int phy_init(struct net_device *dev)
	/* restart auto negotiation, power down phy */
	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
	if (phy_power_down) {
	if (phy_power_down)
		mii_control |= BMCR_PDOWN;
	}
	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
		return PHY_ERROR;
	}

	return 0;
}
@@ -1767,10 +1761,9 @@ static int nv_alloc_rx(struct net_device *dev)
				np->put_rx.orig = np->first_rx.orig;
			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
				np->put_rx_ctx = np->first_rx_ctx;
		} else {
		} else
			return 1;
	}
	}
	return 0;
}

@@ -1800,10 +1793,9 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
				np->put_rx.ex = np->first_rx.ex;
			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
				np->put_rx_ctx = np->first_rx_ctx;
		} else {
		} else
			return 1;
	}
	}
	return 0;
}

@@ -2088,8 +2080,7 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
	/* Setup seeds for all gear LFSRs. */
	get_random_bytes(&seedset, sizeof(seedset));
	seedset = seedset % BACKOFF_SEEDSET_ROWS;
	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
	{
	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
		temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
		temp |= main_seedset[seedset][i-1] & 0x3ff;
		temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2491,10 +2482,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
			np->get_tx_ctx->skb = NULL;
			tx_work++;

			if (np->tx_limit) {
			if (np->tx_limit)
				nv_tx_flip_ownership(dev);
		}
		}
		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
			np->get_tx.ex = np->first_tx.ex;
		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
@@ -2710,10 +2700,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
					}
					/* framing errors are soft errors */
					else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
						if (flags & NV_RX_SUBSTRACT1) {
						if (flags & NV_RX_SUBSTRACT1)
							len--;
					}
					}
					/* the rest are hard errors */
					else {
						if (flags & NV_RX_MISSEDFRAME)
@@ -2745,10 +2734,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
					}
					/* framing errors are soft errors */
					else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
						if (flags & NV_RX2_SUBSTRACT1) {
						if (flags & NV_RX2_SUBSTRACT1)
							len--;
					}
					}
					/* the rest are hard errors */
					else {
						if (flags & NV_RX2_CRCERR)
@@ -2838,10 +2826,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
				}
				/* framing errors are soft errors */
				else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
					if (flags & NV_RX2_SUBSTRACT1) {
					if (flags & NV_RX2_SUBSTRACT1)
						len--;
				}
				}
				/* the rest are hard errors */
				else {
					dev_kfree_skb(skb);
@@ -3325,21 +3312,16 @@ static int nv_update_linkspeed(struct net_device *dev)
				break;
			case ADVERTISE_PAUSE_ASYM:
				if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
				{
					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
				}
				break;
			case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
				if (lpa_pause & LPA_PAUSE_CAP)
				{
				if (lpa_pause & LPA_PAUSE_CAP) {
					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
				}
				if (lpa_pause == LPA_PAUSE_ASYM)
				{
					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
				}
				break;
			}
		} else {
@@ -3776,18 +3758,16 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
	 * the remaining 8 interrupts.
	 */
	for (i = 0; i < 8; i++) {
		if ((irqmask >> i) & 0x1) {
		if ((irqmask >> i) & 0x1)
			msixmap |= vector << (i << 2);
	}
	}
	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);

	msixmap = 0;
	for (i = 0; i < 8; i++) {
		if ((irqmask >> (i + 8)) & 0x1) {
		if ((irqmask >> (i + 8)) & 0x1)
			msixmap |= vector << (i << 2);
	}
	}
	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
}

@@ -3809,9 +3789,8 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
	}

	if (np->msi_flags & NV_MSI_X_CAPABLE) {
		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
			np->msi_x_entry[i].entry = i;
		}
		if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
			np->msi_flags |= NV_MSI_X_ENABLED;
			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
@@ -3903,9 +3882,8 @@ static void nv_free_irq(struct net_device *dev)
	int i;

	if (np->msi_flags & NV_MSI_X_ENABLED) {
		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
			free_irq(np->msi_x_entry[i].vector, dev);
		}
		pci_disable_msix(np->pci_dev);
		np->msi_flags &= ~NV_MSI_X_ENABLED;
	} else {
@@ -4893,10 +4871,9 @@ static int nv_loopback_test(struct net_device *dev)
		if (flags & NV_RX_ERROR)
			ret = 0;
	} else {
		if (flags & NV_RX2_ERROR) {
		if (flags & NV_RX2_ERROR)
			ret = 0;
	}
	}

	if (ret) {
		if (len != pkt_len) {
@@ -4958,11 +4935,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
			netif_addr_lock(dev);
			spin_lock_irq(&np->lock);
			nv_disable_hw_interrupts(dev, np->irqmask);
			if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
			if (!(np->msi_flags & NV_MSI_X_ENABLED))
				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
			} else {
			else
				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
			}
			/* stop engines */
			nv_stop_rxtx(dev);
			nv_txrx_reset(dev);
@@ -5106,8 +5082,7 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
			np->mgmt_sema = 1;
			return 1;
		}
		else
		} else
			udelay(50);
	}

@@ -5251,8 +5226,7 @@ static int nv_open(struct net_device *dev)
			writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
		else
			writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
	}
	else
	} else
		writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5276,9 +5250,8 @@ static int nv_open(struct net_device *dev)
	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
	pci_push(base);

	if (nv_request_irq(dev, 0)) {
	if (nv_request_irq(dev, 0))
		goto out_drain;
	}

	/* ask for interrupts */
	nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5663,16 +5636,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
		writel(powerstate, base + NvRegPowerState2);
	}

	if (np->desc_ver == DESC_VER_1) {
	if (np->desc_ver == DESC_VER_1)
		np->tx_flags = NV_TX_VALID;
	} else {
	else
		np->tx_flags = NV_TX2_VALID;
	}

	np->msi_flags = 0;
	if ((id->driver_data & DEV_HAS_MSI) && msi) {
	if ((id->driver_data & DEV_HAS_MSI) && msi)
		np->msi_flags |= NV_MSI_CAPABLE;
	}

	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
		/* msix has had reported issues when modifying irqmask
		   as in the case of napi, therefore, disable for now
@@ -5735,9 +5707,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
		    nv_mgmt_acquire_sema(dev) &&
		    nv_mgmt_get_version(dev)) {
			np->mac_in_use = 1;
			if (np->mgmt_version > 0) {
			if (np->mgmt_version > 0)
				np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
			}
			dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
				pci_name(pci_dev), np->mac_in_use);
			/* management unit setup the phy already? */
@@ -5799,10 +5770,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
	} else {
		/* see if it is a gigabit phy */
		u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
		if (mii_status & PHY_GIGABIT) {
		if (mii_status & PHY_GIGABIT)
			np->gigabit = PHY_GIGABIT;
	}
	}

	/* set default link speed settings */
	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
@@ -5931,7 +5901,7 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
	int i;

	if (netif_running(dev)) {
		// Gross.
		/* Gross. */
		nv_close(dev);
	}
	netif_device_detach(dev);
@@ -5990,9 +5960,8 @@ static void nv_shutdown(struct pci_dev *pdev)
	 * If we really go for poweroff, we must not restore the MAC,
	 * otherwise the MAC for WOL will be reversed at least on some boards.
	 */
	if (system_state != SYSTEM_POWER_OFF) {
	if (system_state != SYSTEM_POWER_OFF)
		nv_restore_mac_addr(pdev);
	}

	pci_disable_device(pdev);
	/*