Loading drivers/net/skge.c +14 −14 Original line number Original line Diff line number Diff line Loading @@ -130,7 +130,7 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs->len - B3_RI_WTO_R1); regs->len - B3_RI_WTO_R1); } } /* Wake on Lan only supported on Yukon chps with rev 1 or above */ /* Wake on Lan only supported on Yukon chips with rev 1 or above */ static int wol_supported(const struct skge_hw *hw) static int wol_supported(const struct skge_hw *hw) { { return !((hw->chip_id == CHIP_ID_GENESIS || return !((hw->chip_id == CHIP_ID_GENESIS || Loading Loading @@ -170,8 +170,8 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return 0; return 0; } } /* Determine supported/adverised modes based on hardware. /* Determine supported/advertised modes based on hardware. * Note: ethtoool ADVERTISED_xxx == SUPPORTED_xxx * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx */ */ static u32 skge_supported_modes(const struct skge_hw *hw) static u32 skge_supported_modes(const struct skge_hw *hw) { { Loading Loading @@ -532,13 +532,13 @@ static inline u32 hwkhz(const struct skge_hw *hw) return 78215; /* or: 78.125 MHz */ return 78215; /* or: 78.125 MHz */ } } /* Chip hz to microseconds */ /* Chip HZ to microseconds */ static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) { { return (ticks * 1000) / hwkhz(hw); return (ticks * 1000) / hwkhz(hw); } } /* Microseconds to chip hz */ /* Microseconds to chip HZ */ static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) { { return hwkhz(hw) * usec / 1000; return hwkhz(hw) * usec / 1000; Loading Loading @@ -1163,7 +1163,7 @@ static void bcom_phy_init(struct skge_port *skge, int jumbo) xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); /* Use link status change interrrupt */ /* Use link status change interrupt */ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); bcom_check_link(hw, port); bcom_check_link(hw, port); Loading Loading @@ -1203,7 +1203,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) skge_write32(hw, B2_GP_IO, r); skge_write32(hw, B2_GP_IO, r); skge_read32(hw, B2_GP_IO); skge_read32(hw, B2_GP_IO); /* Enable GMII interfac */ /* Enable GMII interface */ xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); bcom_phy_init(skge, jumbo); bcom_phy_init(skge, jumbo); Loading Loading @@ -1254,7 +1254,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) * that jumbo frames larger than 8192 bytes will be * that jumbo frames larger than 8192 bytes will be * truncated. Disabling all bad frame filtering causes * truncated. Disabling all bad frame filtering causes * the RX FIFO to operate in streaming mode, in which * the RX FIFO to operate in streaming mode, in which * case the XMAC will start transfering frames out of the * case the XMAC will start transferring frames out of the * RX FIFO as soon as the FIFO threshold is reached. * RX FIFO as soon as the FIFO threshold is reached. */ */ xm_write32(hw, port, XM_MODE, XM_DEF_MODE); xm_write32(hw, port, XM_MODE, XM_DEF_MODE); Loading Loading @@ -1321,7 +1321,7 @@ static void genesis_stop(struct skge_port *skge) port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); /* /* * If the transfer stucks at the MAC the STOP command will not * If the transfer sticks at the MAC the STOP command will not * terminate if we don't flush the XMAC's transmit FIFO ! * terminate if we don't flush the XMAC's transmit FIFO ! */ */ xm_write32(hw, port, XM_MODE, xm_write32(hw, port, XM_MODE, Loading Loading @@ -1559,7 +1559,7 @@ static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) return v; return v; } } /* Marvell Phy Initailization */ /* Marvell Phy Initialization */ static void yukon_init(struct skge_hw *hw, int port) static void yukon_init(struct skge_hw *hw, int port) { { struct skge_port *skge = netdev_priv(hw->dev[port]); struct skge_port *skge = netdev_priv(hw->dev[port]); Loading Loading @@ -2156,7 +2156,7 @@ static int skge_up(struct net_device *dev) hw->intr_mask |= portirqmask[port]; hw->intr_mask |= portirqmask[port]; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_write32(hw, B0_IMSK, hw->intr_mask); /* Initialze MAC */ /* Initialize MAC */ spin_lock_bh(&hw->phy_lock); spin_lock_bh(&hw->phy_lock); if (hw->chip_id == CHIP_ID_GENESIS) if (hw->chip_id == CHIP_ID_GENESIS) genesis_mac_init(hw, port); genesis_mac_init(hw, port); Loading Loading @@ -2476,7 +2476,7 @@ static void yukon_set_multicast(struct net_device *dev) reg = gma_read16(hw, port, GM_RX_CTRL); reg = gma_read16(hw, port, GM_RX_CTRL); reg |= GM_RXCR_UCF_ENA; reg |= GM_RXCR_UCF_ENA; if (dev->flags & IFF_PROMISC) /* promiscious */ if (dev->flags & IFF_PROMISC) /* promiscuous */ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); else if (dev->flags & IFF_ALLMULTI) /* all multicast */ else if (dev->flags & IFF_ALLMULTI) /* all multicast */ memset(filter, 0xff, sizeof(filter)); memset(filter, 0xff, sizeof(filter)); Loading Loading @@ -2799,7 +2799,7 @@ static void skge_error_irq(struct skge_hw *hw) } } /* /* * Interrrupt from PHY are handled in tasklet (soft irq) * Interrupt from PHY are handled in tasklet (soft irq) * because accessing phy registers requires spin wait which might * because accessing phy registers requires spin wait which might * cause excess interrupt latency. * cause excess interrupt latency. */ */ Loading Loading @@ -3233,7 +3233,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, } } #ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN /* byte swap decriptors in hardware */ /* byte swap descriptors in hardware */ { { u32 reg; u32 reg; Loading Loading
drivers/net/skge.c +14 −14 Original line number Original line Diff line number Diff line Loading @@ -130,7 +130,7 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs->len - B3_RI_WTO_R1); regs->len - B3_RI_WTO_R1); } } /* Wake on Lan only supported on Yukon chps with rev 1 or above */ /* Wake on Lan only supported on Yukon chips with rev 1 or above */ static int wol_supported(const struct skge_hw *hw) static int wol_supported(const struct skge_hw *hw) { { return !((hw->chip_id == CHIP_ID_GENESIS || return !((hw->chip_id == CHIP_ID_GENESIS || Loading Loading @@ -170,8 +170,8 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return 0; return 0; } } /* Determine supported/adverised modes based on hardware. /* Determine supported/advertised modes based on hardware. * Note: ethtoool ADVERTISED_xxx == SUPPORTED_xxx * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx */ */ static u32 skge_supported_modes(const struct skge_hw *hw) static u32 skge_supported_modes(const struct skge_hw *hw) { { Loading Loading @@ -532,13 +532,13 @@ static inline u32 hwkhz(const struct skge_hw *hw) return 78215; /* or: 78.125 MHz */ return 78215; /* or: 78.125 MHz */ } } /* Chip hz to microseconds */ /* Chip HZ to microseconds */ static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) { { return (ticks * 1000) / hwkhz(hw); return (ticks * 1000) / hwkhz(hw); } } /* Microseconds to chip hz */ /* Microseconds to chip HZ */ static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) { { return hwkhz(hw) * usec / 1000; return hwkhz(hw) * usec / 1000; Loading Loading @@ -1163,7 +1163,7 @@ static void bcom_phy_init(struct skge_port *skge, int jumbo) xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); /* Use link status change interrrupt */ /* Use link status change interrupt */ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); bcom_check_link(hw, port); bcom_check_link(hw, port); Loading Loading @@ -1203,7 +1203,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) skge_write32(hw, B2_GP_IO, r); skge_write32(hw, B2_GP_IO, r); skge_read32(hw, B2_GP_IO); skge_read32(hw, B2_GP_IO); /* Enable GMII interfac */ /* Enable GMII interface */ xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); bcom_phy_init(skge, jumbo); bcom_phy_init(skge, jumbo); Loading Loading @@ -1254,7 +1254,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) * that jumbo frames larger than 8192 bytes will be * that jumbo frames larger than 8192 bytes will be * truncated. Disabling all bad frame filtering causes * truncated. Disabling all bad frame filtering causes * the RX FIFO to operate in streaming mode, in which * the RX FIFO to operate in streaming mode, in which * case the XMAC will start transfering frames out of the * case the XMAC will start transferring frames out of the * RX FIFO as soon as the FIFO threshold is reached. * RX FIFO as soon as the FIFO threshold is reached. */ */ xm_write32(hw, port, XM_MODE, XM_DEF_MODE); xm_write32(hw, port, XM_MODE, XM_DEF_MODE); Loading Loading @@ -1321,7 +1321,7 @@ static void genesis_stop(struct skge_port *skge) port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); /* /* * If the transfer stucks at the MAC the STOP command will not * If the transfer sticks at the MAC the STOP command will not * terminate if we don't flush the XMAC's transmit FIFO ! * terminate if we don't flush the XMAC's transmit FIFO ! */ */ xm_write32(hw, port, XM_MODE, xm_write32(hw, port, XM_MODE, Loading Loading @@ -1559,7 +1559,7 @@ static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) return v; return v; } } /* Marvell Phy Initailization */ /* Marvell Phy Initialization */ static void yukon_init(struct skge_hw *hw, int port) static void yukon_init(struct skge_hw *hw, int port) { { struct skge_port *skge = netdev_priv(hw->dev[port]); struct skge_port *skge = netdev_priv(hw->dev[port]); Loading Loading @@ -2156,7 +2156,7 @@ static int skge_up(struct net_device *dev) hw->intr_mask |= portirqmask[port]; hw->intr_mask |= portirqmask[port]; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_write32(hw, B0_IMSK, hw->intr_mask); /* Initialze MAC */ /* Initialize MAC */ spin_lock_bh(&hw->phy_lock); spin_lock_bh(&hw->phy_lock); if (hw->chip_id == CHIP_ID_GENESIS) if (hw->chip_id == CHIP_ID_GENESIS) genesis_mac_init(hw, port); genesis_mac_init(hw, port); Loading Loading @@ -2476,7 +2476,7 @@ static void yukon_set_multicast(struct net_device *dev) reg = gma_read16(hw, port, GM_RX_CTRL); reg = gma_read16(hw, port, GM_RX_CTRL); reg |= GM_RXCR_UCF_ENA; reg |= GM_RXCR_UCF_ENA; if (dev->flags & IFF_PROMISC) /* promiscious */ if (dev->flags & IFF_PROMISC) /* promiscuous */ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); else if (dev->flags & IFF_ALLMULTI) /* all multicast */ else if (dev->flags & IFF_ALLMULTI) /* all multicast */ memset(filter, 0xff, sizeof(filter)); memset(filter, 0xff, sizeof(filter)); Loading Loading @@ -2799,7 +2799,7 @@ static void skge_error_irq(struct skge_hw *hw) } } /* /* * Interrrupt from PHY are handled in tasklet (soft irq) * Interrupt from PHY are handled in tasklet (soft irq) * because accessing phy registers requires spin wait which might * because accessing phy registers requires spin wait which might * cause excess interrupt latency. * cause excess interrupt latency. */ */ Loading Loading @@ -3233,7 +3233,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, } } #ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN /* byte swap decriptors in hardware */ /* byte swap descriptors in hardware */ { { u32 reg; u32 reg; Loading