Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit feb990d4 authored by Michał Mirosław's avatar Michał Mirosław Committed by David S. Miller
Browse files

net: vxge: convert to hw_features



Side effect: ->gro_enable is removed as napi_gro_receive() does the
fallback itself.

Signed-off-by: default avatarMichał Mirosław <mirq-linux@rere.qmqm.pl>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 30f554f9
Loading
Loading
Loading
Loading
+0 −72
Original line number Original line Diff line number Diff line
@@ -1071,35 +1071,6 @@ static int vxge_ethtool_get_regs_len(struct net_device *dev)
	return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
	return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
}
}


static u32 vxge_get_rx_csum(struct net_device *dev)
{
	struct vxgedev *vdev = netdev_priv(dev);

	return vdev->rx_csum;
}

static int vxge_set_rx_csum(struct net_device *dev, u32 data)
{
	struct vxgedev *vdev = netdev_priv(dev);

	if (data)
		vdev->rx_csum = 1;
	else
		vdev->rx_csum = 0;

	return 0;
}

static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
{
	if (data)
		dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
	else
		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);

	return 0;
}

static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
{
{
	struct vxgedev *vdev = netdev_priv(dev);
	struct vxgedev *vdev = netdev_priv(dev);
@@ -1119,40 +1090,6 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
	}
	}
}
}


static int vxge_set_flags(struct net_device *dev, u32 data)
{
	struct vxgedev *vdev = netdev_priv(dev);
	enum vxge_hw_status status;

	if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH))
		return -EINVAL;

	if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
		return 0;

	if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
		return -EINVAL;

	vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);

	/* Enabling RTH requires some of the logic in vxge_device_register and a
	 * vpath reset.  Due to these restrictions, only allow modification
	 * while the interface is down.
	 */
	status = vxge_reset_all_vpaths(vdev);
	if (status != VXGE_HW_OK) {
		vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
		return -EFAULT;
	}

	if (vdev->devh->config.rth_en)
		dev->features |= NETIF_F_RXHASH;
	else
		dev->features &= ~NETIF_F_RXHASH;

	return 0;
}

static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
{
{
	struct vxgedev *vdev = netdev_priv(dev);
	struct vxgedev *vdev = netdev_priv(dev);
@@ -1181,19 +1118,10 @@ static const struct ethtool_ops vxge_ethtool_ops = {
	.get_link		= ethtool_op_get_link,
	.get_link		= ethtool_op_get_link,
	.get_pauseparam		= vxge_ethtool_getpause_data,
	.get_pauseparam		= vxge_ethtool_getpause_data,
	.set_pauseparam		= vxge_ethtool_setpause_data,
	.set_pauseparam		= vxge_ethtool_setpause_data,
	.get_rx_csum		= vxge_get_rx_csum,
	.set_rx_csum		= vxge_set_rx_csum,
	.get_tx_csum		= ethtool_op_get_tx_csum,
	.set_tx_csum		= ethtool_op_set_tx_ipv6_csum,
	.get_sg			= ethtool_op_get_sg,
	.set_sg			= ethtool_op_set_sg,
	.get_tso		= ethtool_op_get_tso,
	.set_tso		= vxge_ethtool_op_set_tso,
	.get_strings		= vxge_ethtool_get_strings,
	.get_strings		= vxge_ethtool_get_strings,
	.set_phys_id		= vxge_ethtool_idnic,
	.set_phys_id		= vxge_ethtool_idnic,
	.get_sset_count		= vxge_ethtool_get_sset_count,
	.get_sset_count		= vxge_ethtool_get_sset_count,
	.get_ethtool_stats	= vxge_get_ethtool_stats,
	.get_ethtool_stats	= vxge_get_ethtool_stats,
	.set_flags		= vxge_set_flags,
	.flash_device		= vxge_fw_flash,
	.flash_device		= vxge_fw_flash,
};
};


+55 −45
Original line number Original line Diff line number Diff line
@@ -304,7 +304,6 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
		"%s: %s:%d  skb protocol = %d",
		"%s: %s:%d  skb protocol = %d",
		ring->ndev->name, __func__, __LINE__, skb->protocol);
		ring->ndev->name, __func__, __LINE__, skb->protocol);


	if (ring->gro_enable) {
	if (ring->vlgrp && ext_info->vlan &&
	if (ring->vlgrp && ext_info->vlan &&
		(ring->vlan_tag_strip ==
		(ring->vlan_tag_strip ==
			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
@@ -312,14 +311,7 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
				ext_info->vlan, skb);
				ext_info->vlan, skb);
	else
	else
		napi_gro_receive(ring->napi_p, skb);
		napi_gro_receive(ring->napi_p, skb);
	} else {

		if (ring->vlgrp && vlan &&
			(ring->vlan_tag_strip ==
				VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
			vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
		else
			netif_receive_skb(skb);
	}
	vxge_debug_entryexit(VXGE_TRACE,
	vxge_debug_entryexit(VXGE_TRACE,
		"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
		"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
}
}
@@ -490,7 +482,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,


		if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
		if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
		    !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
		    !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
		    ring->rx_csum && /* Offload Rx side CSUM */
		    (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
		    ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
		    ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
		    ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
		    ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
			skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2094,11 +2086,9 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
				vdev->config.fifo_indicate_max_pkts;
				vdev->config.fifo_indicate_max_pkts;
			vpath->fifo.tx_vector_no = 0;
			vpath->fifo.tx_vector_no = 0;
			vpath->ring.rx_vector_no = 0;
			vpath->ring.rx_vector_no = 0;
			vpath->ring.rx_csum = vdev->rx_csum;
			vpath->ring.rx_hwts = vdev->rx_hwts;
			vpath->ring.rx_hwts = vdev->rx_hwts;
			vpath->is_open = 1;
			vpath->is_open = 1;
			vdev->vp_handles[i] = vpath->handle;
			vdev->vp_handles[i] = vpath->handle;
			vpath->ring.gro_enable = vdev->config.gro_enable;
			vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
			vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
			vdev->stats.vpaths_open++;
			vdev->stats.vpaths_open++;
		} else {
		} else {
@@ -2670,6 +2660,40 @@ static void vxge_poll_vp_lockup(unsigned long data)
	mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
	mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
}
}


static u32 vxge_fix_features(struct net_device *dev, u32 features)
{
	u32 changed = dev->features ^ features;

	/* Enabling RTH requires some of the logic in vxge_device_register and a
	 * vpath reset.  Due to these restrictions, only allow modification
	 * while the interface is down.
	 */
	if ((changed & NETIF_F_RXHASH) && netif_running(dev))
		features ^= NETIF_F_RXHASH;

	return features;
}

static int vxge_set_features(struct net_device *dev, u32 features)
{
	struct vxgedev *vdev = netdev_priv(dev);
	u32 changed = dev->features ^ features;

	if (!(changed & NETIF_F_RXHASH))
		return 0;

	/* !netif_running() ensured by vxge_fix_features() */

	vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
	if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
		dev->features = features ^ NETIF_F_RXHASH;
		vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
		return -EIO;
	}

	return 0;
}

/**
/**
 * vxge_open
 * vxge_open
 * @dev: pointer to the device structure.
 * @dev: pointer to the device structure.
@@ -3369,6 +3393,8 @@ static const struct net_device_ops vxge_netdev_ops = {
	.ndo_do_ioctl           = vxge_ioctl,
	.ndo_do_ioctl           = vxge_ioctl,
	.ndo_set_mac_address    = vxge_set_mac_addr,
	.ndo_set_mac_address    = vxge_set_mac_addr,
	.ndo_change_mtu         = vxge_change_mtu,
	.ndo_change_mtu         = vxge_change_mtu,
	.ndo_fix_features	= vxge_fix_features,
	.ndo_set_features	= vxge_set_features,
	.ndo_vlan_rx_register   = vxge_vlan_rx_register,
	.ndo_vlan_rx_register   = vxge_vlan_rx_register,
	.ndo_vlan_rx_kill_vid   = vxge_vlan_rx_kill_vid,
	.ndo_vlan_rx_kill_vid   = vxge_vlan_rx_kill_vid,
	.ndo_vlan_rx_add_vid	= vxge_vlan_rx_add_vid,
	.ndo_vlan_rx_add_vid	= vxge_vlan_rx_add_vid,
@@ -3415,14 +3441,21 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
	vdev->devh = hldev;
	vdev->devh = hldev;
	vdev->pdev = hldev->pdev;
	vdev->pdev = hldev->pdev;
	memcpy(&vdev->config, config, sizeof(struct vxge_config));
	memcpy(&vdev->config, config, sizeof(struct vxge_config));
	vdev->rx_csum = 1;	/* Enable Rx CSUM by default. */
	vdev->rx_hwts = 0;
	vdev->rx_hwts = 0;
	vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
	vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);


	SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
	SET_NETDEV_DEV(ndev, &vdev->pdev->dev);


	ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
	ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
				NETIF_F_HW_VLAN_FILTER;
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
		NETIF_F_TSO | NETIF_F_TSO6 |
		NETIF_F_HW_VLAN_TX;
	if (vdev->config.rth_steering != NO_STEERING)
		ndev->hw_features |= NETIF_F_RXHASH;

	ndev->features |= ndev->hw_features |
		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;

	/*  Driver entry points */
	/*  Driver entry points */
	ndev->irq = vdev->pdev->irq;
	ndev->irq = vdev->pdev->irq;
	ndev->base_addr = (unsigned long) hldev->bar0;
	ndev->base_addr = (unsigned long) hldev->bar0;
@@ -3434,11 +3467,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,


	vxge_initialize_ethtool_ops(ndev);
	vxge_initialize_ethtool_ops(ndev);


	if (vdev->config.rth_steering != NO_STEERING) {
		ndev->features |= NETIF_F_RXHASH;
		hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
	}

	/* Allocate memory for vpath */
	/* Allocate memory for vpath */
	vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
	vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
				no_of_vpath, GFP_KERNEL);
				no_of_vpath, GFP_KERNEL);
@@ -3450,9 +3478,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
		goto _out1;
		goto _out1;
	}
	}


	ndev->features |= NETIF_F_SG;

	ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
	vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
	vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
		"%s : checksuming enabled", __func__);
		"%s : checksuming enabled", __func__);


@@ -3462,11 +3487,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
			"%s : using High DMA", __func__);
			"%s : using High DMA", __func__);
	}
	}


	ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;

	if (vdev->config.gro_enable)
		ndev->features |= NETIF_F_GRO;

	ret = register_netdev(ndev);
	ret = register_netdev(ndev);
	if (ret) {
	if (ret) {
		vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
		vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
@@ -3996,15 +4016,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
		vdev->config.tx_steering_type = 0;
		vdev->config.tx_steering_type = 0;
	}
	}


	if (vdev->config.gro_enable) {
		vxge_debug_init(VXGE_ERR,
			"%s: Generic receive offload enabled",
			vdev->ndev->name);
	} else
		vxge_debug_init(VXGE_TRACE,
			"%s: Generic receive offload disabled",
			vdev->ndev->name);

	if (vdev->config.addr_learn_en)
	if (vdev->config.addr_learn_en)
		vxge_debug_init(VXGE_TRACE,
		vxge_debug_init(VXGE_TRACE,
			"%s: MAC Address learning enabled", vdev->ndev->name);
			"%s: MAC Address learning enabled", vdev->ndev->name);
@@ -4589,7 +4600,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
	/* set private device info */
	/* set private device info */
	pci_set_drvdata(pdev, hldev);
	pci_set_drvdata(pdev, hldev);


	ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
	ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
	ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
	ll_config->addr_learn_en = addr_learn_en;
	ll_config->addr_learn_en = addr_learn_en;
	ll_config->rth_algorithm = RTH_ALG_JENKINS;
	ll_config->rth_algorithm = RTH_ALG_JENKINS;
+4 −10
Original line number Original line Diff line number Diff line
@@ -168,9 +168,6 @@ struct vxge_config {


#define	NEW_NAPI_WEIGHT	64
#define	NEW_NAPI_WEIGHT	64
	int		napi_weight;
	int		napi_weight;
#define VXGE_GRO_DONOT_AGGREGATE		0
#define VXGE_GRO_ALWAYS_AGGREGATE		1
	int		gro_enable;
	int		intr_type;
	int		intr_type;
#define INTA	0
#define INTA	0
#define MSI	1
#define MSI	1
@@ -290,13 +287,11 @@ struct vxge_ring {
	unsigned long interrupt_count;
	unsigned long interrupt_count;
	unsigned long jiffies;
	unsigned long jiffies;


	/* copy of the flag indicating whether rx_csum is to be used */
	/* copy of the flag indicating whether rx_hwts is to be used */
	u32 rx_csum:1,
	u32 rx_hwts:1;
	    rx_hwts:1;


	int pkts_processed;
	int pkts_processed;
	int budget;
	int budget;
	int gro_enable;


	struct napi_struct napi;
	struct napi_struct napi;
	struct napi_struct *napi_p;
	struct napi_struct *napi_p;
@@ -369,9 +364,8 @@ struct vxgedev {
	 */
	 */
	u16		all_multi_flg;
	u16		all_multi_flg;


	 /* A flag indicating whether rx_csum is to be used or not. */
	/* A flag indicating whether rx_hwts is to be used or not. */
	u32	rx_csum:1,
	u32	rx_hwts:1,
		rx_hwts:1,
		titan1:1;
		titan1:1;


	struct vxge_msix_entry *vxge_entries;
	struct vxge_msix_entry *vxge_entries;