Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bc474751 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ibmvnic-Tunable-parameter-support'



John Allen says:

====================
ibmvnic: Tunable parameter support

This series implements support for changing tunable parameters such as the
mtu, number of tx/rx queues, and number of buffers per queue via ethtool
and ifconfig.

v2: -Fix conflict with Tom's recently applied TSO/SG patches
v3: -Initialize rc in __ibmvnic_reset fixing build warning
    -Fix buggy behavior with pending mac changes. Use boolean flag
     to track if mac change is needed on open rather than relying on
     the desired->mac pointer.
    -Directly include tunable structs in the adapter struct rather
     than keeping pointers, eliminating the need to directly allocate
     them.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c9d0dc4b 2a1bf511
Loading
Loading
Loading
Loading
+188 −29
Original line number Diff line number Diff line
@@ -115,6 +115,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);

struct ibmvnic_stat {
	char name[ETH_GSTRING_LEN];
@@ -926,6 +927,11 @@ static int ibmvnic_open(struct net_device *netdev)

	mutex_lock(&adapter->reset_lock);

	if (adapter->mac_change_pending) {
		__ibmvnic_set_mac(netdev, &adapter->desired.mac);
		adapter->mac_change_pending = false;
	}

	if (adapter->state != VNIC_CLOSED) {
		rc = ibmvnic_login(netdev);
		if (rc) {
@@ -1426,7 +1432,7 @@ static void ibmvnic_set_multi(struct net_device *netdev)
	}
}

static int ibmvnic_set_mac(struct net_device *netdev, void *p)
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
{
	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
	struct sockaddr *addr = p;
@@ -1444,6 +1450,22 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
	return 0;
}

static int ibmvnic_set_mac(struct net_device *netdev, void *p)
{
	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
	struct sockaddr *addr = p;

	if (adapter->state != VNIC_OPEN) {
		memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
		adapter->mac_change_pending = true;
		return 0;
	}

	__ibmvnic_set_mac(netdev, addr);

	return 0;
}

/**
 * do_reset returns zero if we are able to keep processing reset events, or
 * non-zero if we hit a fatal error and must halt.
@@ -1470,6 +1492,13 @@ static int do_reset(struct ibmvnic_adapter *adapter,
	if (rc)
		return rc;

	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
	    adapter->wait_for_reset) {
		release_resources(adapter);
		release_sub_crqs(adapter);
		release_crq_queue(adapter);
	}

	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
		/* remove the closed state so when we call open it appears
		 * we are coming from the probed state.
@@ -1478,7 +1507,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,

		rc = ibmvnic_init(adapter);
		if (rc)
			return 0;
			return IBMVNIC_INIT_FAILED;

		/* If the adapter was in PROBE state prior to the reset,
		 * exit here.
@@ -1492,6 +1521,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
			return 0;
		}

		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
		    adapter->wait_for_reset) {
			rc = init_resources(adapter);
			if (rc)
				return rc;
		} else {
			rc = reset_tx_pools(adapter);
			if (rc)
				return rc;
@@ -1503,6 +1538,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
			if (reset_state == VNIC_CLOSED)
				return 0;
		}
	}

	rc = __ibmvnic_open(netdev);
	if (rc) {
@@ -1561,7 +1597,7 @@ static void __ibmvnic_reset(struct work_struct *work)
	struct ibmvnic_adapter *adapter;
	struct net_device *netdev;
	u32 reset_state;
	int rc;
	int rc = 0;

	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
	netdev = adapter->netdev;
@@ -1574,12 +1610,18 @@ static void __ibmvnic_reset(struct work_struct *work)
	while (rwi) {
		rc = do_reset(adapter, rwi, reset_state);
		kfree(rwi);
		if (rc)
		if (rc && rc != IBMVNIC_INIT_FAILED)
			break;

		rwi = get_next_rwi(adapter);
	}

	if (adapter->wait_for_reset) {
		adapter->wait_for_reset = false;
		adapter->reset_done_rc = rc;
		complete(&adapter->reset_done);
	}

	if (rc) {
		netdev_dbg(adapter->netdev, "Reset failed\n");
		free_all_rwi(adapter);
@@ -1759,9 +1801,42 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
}
#endif

static int wait_for_reset(struct ibmvnic_adapter *adapter)
{
	adapter->fallback.mtu = adapter->req_mtu;
	adapter->fallback.rx_queues = adapter->req_rx_queues;
	adapter->fallback.tx_queues = adapter->req_tx_queues;
	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;

	init_completion(&adapter->reset_done);
	ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
	adapter->wait_for_reset = true;
	wait_for_completion(&adapter->reset_done);

	if (adapter->reset_done_rc) {
		adapter->desired.mtu = adapter->fallback.mtu;
		adapter->desired.rx_queues = adapter->fallback.rx_queues;
		adapter->desired.tx_queues = adapter->fallback.tx_queues;
		adapter->desired.rx_entries = adapter->fallback.rx_entries;
		adapter->desired.tx_entries = adapter->fallback.tx_entries;

		init_completion(&adapter->reset_done);
		ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
		wait_for_completion(&adapter->reset_done);
	}
	adapter->wait_for_reset = false;

	return adapter->reset_done_rc;
}

static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
{
	return -EOPNOTSUPP;
	struct ibmvnic_adapter *adapter = netdev_priv(netdev);

	adapter->desired.mtu = new_mtu + ETH_HLEN;

	return wait_for_reset(adapter);
}

static const struct net_device_ops ibmvnic_netdev_ops = {
@@ -1849,6 +1924,27 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
	ring->rx_jumbo_pending = 0;
}

static int ibmvnic_set_ringparam(struct net_device *netdev,
				 struct ethtool_ringparam *ring)
{
	struct ibmvnic_adapter *adapter = netdev_priv(netdev);

	if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
	    ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
		netdev_err(netdev, "Invalid request.\n");
		netdev_err(netdev, "Max tx buffers = %llu\n",
			   adapter->max_rx_add_entries_per_subcrq);
		netdev_err(netdev, "Max rx buffers = %llu\n",
			   adapter->max_tx_entries_per_subcrq);
		return -EINVAL;
	}

	adapter->desired.rx_entries = ring->rx_pending;
	adapter->desired.tx_entries = ring->tx_pending;

	return wait_for_reset(adapter);
}

static void ibmvnic_get_channels(struct net_device *netdev,
				 struct ethtool_channels *channels)
{
@@ -1864,6 +1960,17 @@ static void ibmvnic_get_channels(struct net_device *netdev,
	channels->combined_count = 0;
}

static int ibmvnic_set_channels(struct net_device *netdev,
				struct ethtool_channels *channels)
{
	struct ibmvnic_adapter *adapter = netdev_priv(netdev);

	adapter->desired.rx_queues = channels->rx_count;
	adapter->desired.tx_queues = channels->tx_count;

	return wait_for_reset(adapter);
}

static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
	struct ibmvnic_adapter *adapter = netdev_priv(dev);
@@ -1960,7 +2067,9 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
	.set_msglevel		= ibmvnic_set_msglevel,
	.get_link		= ibmvnic_get_link,
	.get_ringparam		= ibmvnic_get_ringparam,
	.set_ringparam		= ibmvnic_set_ringparam,
	.get_channels		= ibmvnic_get_channels,
	.set_channels		= ibmvnic_set_channels,
	.get_strings            = ibmvnic_get_strings,
	.get_sset_count         = ibmvnic_get_sset_count,
	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
@@ -2426,6 +2535,7 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
{
	struct device *dev = &adapter->vdev->dev;
	union ibmvnic_crq crq;
	int max_entries;

	if (!retry) {
		/* Sub-CRQ entries are 32 byte long */
@@ -2437,21 +2547,60 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
			return;
		}

		/* Get the minimum between the queried max and the entries
		 * that fit in our PAGE_SIZE
		 */
		if (adapter->desired.mtu)
			adapter->req_mtu = adapter->desired.mtu;
		else
			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;

		if (!adapter->desired.tx_entries)
			adapter->desired.tx_entries =
					adapter->max_tx_entries_per_subcrq;
		if (!adapter->desired.rx_entries)
			adapter->desired.rx_entries =
					adapter->max_rx_add_entries_per_subcrq;

		max_entries = IBMVNIC_MAX_LTB_SIZE /
			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);

		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
			adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
			adapter->desired.tx_entries = max_entries;
		}

		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
			adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
			adapter->desired.rx_entries = max_entries;
		}

		if (adapter->desired.tx_entries)
			adapter->req_tx_entries_per_subcrq =
					adapter->desired.tx_entries;
		else
			adapter->req_tx_entries_per_subcrq =
		    adapter->max_tx_entries_per_subcrq > entries_page ?
		    entries_page : adapter->max_tx_entries_per_subcrq;
					adapter->max_tx_entries_per_subcrq;

		if (adapter->desired.rx_entries)
			adapter->req_rx_add_entries_per_subcrq =
					adapter->desired.rx_entries;
		else
			adapter->req_rx_add_entries_per_subcrq =
		    adapter->max_rx_add_entries_per_subcrq > entries_page ?
		    entries_page : adapter->max_rx_add_entries_per_subcrq;
					adapter->max_rx_add_entries_per_subcrq;

		adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
		adapter->req_rx_queues = adapter->opt_rx_comp_queues;
		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
		if (adapter->desired.tx_queues)
			adapter->req_tx_queues =
					adapter->desired.tx_queues;
		else
			adapter->req_tx_queues =
					adapter->opt_tx_comp_sub_queues;

		adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
		if (adapter->desired.rx_queues)
			adapter->req_rx_queues =
					adapter->desired.rx_queues;
		else
			adapter->req_rx_queues =
					adapter->opt_rx_comp_queues;

		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
	}

	memset(&crq, 0, sizeof(crq));
@@ -3272,6 +3421,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
			    struct ibmvnic_adapter *adapter)
{
	struct device *dev = &adapter->vdev->dev;
	struct net_device *netdev = adapter->netdev;
	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
	struct ibmvnic_login_buffer *login = adapter->login_buf;
	int i;
@@ -3291,6 +3441,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
		return 0;
	}

	netdev->mtu = adapter->req_mtu - ETH_HLEN;

	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
		netdev_dbg(adapter->netdev, "%016lx\n",
@@ -3846,7 +3998,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
	unsigned long timeout = msecs_to_jiffies(30000);
	int rc;

	if (adapter->resetting) {
	if (adapter->resetting && !adapter->wait_for_reset) {
		rc = ibmvnic_reset_crq(adapter);
		if (!rc)
			rc = vio_enable_interrupts(adapter->vdev);
@@ -3880,7 +4032,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
		return -1;
	}

	if (adapter->resetting)
	if (adapter->resetting && !adapter->wait_for_reset)
		rc = reset_sub_crq_queues(adapter);
	else
		rc = init_sub_crqs(adapter);
@@ -3949,6 +4101,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
	mutex_init(&adapter->rwi_lock);
	adapter->resetting = false;

	adapter->mac_change_pending = false;

	do {
		rc = ibmvnic_init(adapter);
		if (rc && rc != EAGAIN)
@@ -3956,6 +4110,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
	} while (rc == EAGAIN);

	netdev->mtu = adapter->req_mtu - ETH_HLEN;
	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;

	rc = device_create_file(&dev->dev, &dev_attr_failover);
	if (rc)
@@ -3970,6 +4126,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
	dev_info(&dev->dev, "ibmvnic registered\n");

	adapter->state = VNIC_PROBED;

	adapter->wait_for_reset = false;

	return 0;

ibmvnic_register_fail:
+25 −1
Original line number Diff line number Diff line
@@ -30,6 +30,8 @@
#define IBMVNIC_DRIVER_VERSION	"1.0.1"
#define IBMVNIC_INVALID_MAP	-1
#define IBMVNIC_STATS_TIMEOUT	1
#define IBMVNIC_INIT_FAILED	2

/* basic structures plus 100 2k buffers */
#define IBMVNIC_IO_ENTITLEMENT_DEFAULT	610305

@@ -42,6 +44,9 @@
#define IBMVNIC_TSO_BUF_SZ	65536
#define IBMVNIC_TSO_BUFS	64

#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
#define IBMVNIC_BUFFER_HLEN 500

struct ibmvnic_login_buffer {
	__be32 len;
	__be32 version;
@@ -945,13 +950,23 @@ enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
			   VNIC_RESET_MOBILITY,
			   VNIC_RESET_FATAL,
			   VNIC_RESET_NON_FATAL,
			   VNIC_RESET_TIMEOUT};
			   VNIC_RESET_TIMEOUT,
			   VNIC_RESET_CHANGE_PARAM};

struct ibmvnic_rwi {
	enum ibmvnic_reset_reason reset_reason;
	struct list_head list;
};

struct ibmvnic_tunables {
	u64 rx_queues;
	u64 tx_queues;
	u64 rx_entries;
	u64 tx_entries;
	u64 mtu;
	struct sockaddr mac;
};

struct ibmvnic_adapter {
	struct vio_dev *vdev;
	struct net_device *netdev;
@@ -1012,6 +1027,10 @@ struct ibmvnic_adapter {
	struct completion fw_done;
	int fw_done_rc;

	struct completion reset_done;
	int reset_done_rc;
	bool wait_for_reset;

	/* partner capabilities */
	u64 min_tx_queues;
	u64 min_rx_queues;
@@ -1056,4 +1075,9 @@ struct ibmvnic_adapter {
	struct work_struct ibmvnic_reset;
	bool resetting;
	bool napi_enabled, from_passive_init;

	bool mac_change_pending;

	struct ibmvnic_tunables desired;
	struct ibmvnic_tunables fallback;
};