Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e8e9f696 authored by Joe Perches's avatar Joe Perches Committed by David S. Miller
Browse files

drivers/net/ixgbe/ixgbe_main.c: Checkpatch cleanups



Whitespace cleanups.
Move inline keyword after function type declarations.

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 933d41f1
Loading
Loading
Loading
Loading
+236 −231
Original line number Diff line number Diff line
@@ -131,8 +131,8 @@ static struct notifier_block dca_notifier = {
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
                 "per physical function");
MODULE_PARM_DESC(max_vfs,
		 "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */

MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -169,7 +169,7 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)

	/* take a breather then clean up driver data */
	msleep(100);
	if (adapter->vfinfo)

	kfree(adapter->vfinfo);
	adapter->vfinfo = NULL;

@@ -1281,7 +1281,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,

		if (staterr & IXGBE_RXD_STAT_EOP) {
			if (skb->prev)
				skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
				skb = ixgbe_transform_rsc_queue(skb,
								&(rx_ring->rsc_count));
			if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
				if (IXGBE_RSC_CB(skb)->delay_unmap) {
					dma_unmap_single(&pdev->dev,
@@ -1292,7 +1293,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
					IXGBE_RSC_CB(skb)->delay_unmap = false;
				}
				if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
					rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
					rx_ring->rsc_count +=
						skb_shinfo(skb)->nr_frags;
				else
					rx_ring->rsc_count++;
				rx_ring->rsc_flush++;
@@ -2020,7 +2022,8 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
		if (adapter->tx_itr_setting & 1)
			ixgbe_set_itr_msix(q_vector);
		if (!test_bit(__IXGBE_DOWN, &adapter->state))
			ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
			ixgbe_irq_enable_queues(adapter,
						((u64)1 << q_vector->v_idx));
	}

	return work_done;
@@ -2142,12 +2145,10 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
		if (handler == &ixgbe_msix_clean_rx) {
			sprintf(adapter->name[vector], "%s-%s-%d",
				netdev->name, "rx", ri++);
		}
		else if(handler == &ixgbe_msix_clean_tx) {
		} else if (handler == &ixgbe_msix_clean_tx) {
			sprintf(adapter->name[vector], "%s-%s-%d",
				netdev->name, "tx", ti++);
		}
		else
		} else
			sprintf(adapter->name[vector], "%s-%s-%d",
				netdev->name, "TxRx", vector);

@@ -2931,8 +2932,7 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
			rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;

#ifdef IXGBE_FCOE
		if (netdev->features & NETIF_F_FCOE_MTU)
		{
		if (netdev->features & NETIF_F_FCOE_MTU) {
			struct ixgbe_ring_feature *f;
			f = &adapter->ring_feature[RING_F_FCOE];
			if ((i >= f->mask) && (i < f->mask + f->indices)) {
@@ -3464,7 +3464,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
		goto link_cfg_out;

	if (hw->mac.ops.get_link_capabilities)
		ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
		ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
							&negotiation);
	if (ret)
		goto link_cfg_out;

@@ -3995,7 +3996,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 * Rx load across CPUs using RSS.
 *
 **/
static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
{
	bool ret = false;
	struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
@@ -4289,7 +4290,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
 *
 **/
static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
{
	int i;
	bool ret = false;
@@ -5462,10 +5463,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
	if (hw->mac.type == ixgbe_mac_82599EB) {
		u64 tmp;
		adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
		tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
		tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
						/* 4 high bits of GORC */
		adapter->stats.gorc += (tmp << 32);
		adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
		tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
		tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
						/* 4 high bits of GOTC */
		adapter->stats.gotc += (tmp << 32);
		adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
		IXGBE_READ_REG(hw, IXGBE_TORH);	/* to clear */
@@ -6888,11 +6891,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,

	/* print bus type/speed/width info */
	e_dev_info("(PCI Express:%s:%s) %pM\n",
	        ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
	         (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
	        ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
	         (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
	         (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
		   (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
		    hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
		    "Unknown"),
		   (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
		    hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
		    hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
		    "Unknown"),
		   netdev->dev_addr);
	ixgbe_read_pba_num_generic(hw, &part_num);
@@ -6941,7 +6945,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
		INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);

	if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
		INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task);
		INIT_WORK(&adapter->check_overtemp_task,
			  ixgbe_check_overtemp_task);
#ifdef CONFIG_IXGBE_DCA
	if (dca_add_requester(&pdev->dev) == 0) {
		adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -6977,8 +6982,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
err_ioremap:
	free_netdev(netdev);
err_alloc_etherdev:
	pci_release_selected_regions(pdev, pci_select_bars(pdev,
	                             IORESOURCE_MEM));
	pci_release_selected_regions(pdev,
				     pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
	pci_disable_device(pdev);