Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 975cd350 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 's390-qeth-updates'



Julian Wiedmann says:

====================
s390/qeth: updates 2018-07-19

please apply one more round of qeth patches to net-next.
This brings additional performance improvements for the transmit code,
and some refactoring to pave the way for using netdev_priv.
Also, two minor fixes for rare corner cases.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d528114b 5f89eca5
Loading
Loading
Loading
Loading
+5 −6
Original line number Diff line number Diff line
@@ -104,6 +104,7 @@ struct qeth_dbf_info {
struct qeth_perf_stats {
	unsigned int bufs_rec;
	unsigned int bufs_sent;
	unsigned int buf_elements_sent;

	unsigned int skbs_sent_pack;
	unsigned int bufs_sent_pack;
@@ -137,7 +138,6 @@ struct qeth_perf_stats {
	unsigned int large_send_bytes;
	unsigned int large_send_cnt;
	unsigned int sg_skbs_sent;
	unsigned int sg_frags_sent;
	/* initial values when measuring starts */
	unsigned long initial_rx_packets;
	unsigned long initial_tx_packets;
@@ -658,11 +658,8 @@ struct qeth_card_info {
	char mcl_level[QETH_MCL_LENGTH + 1];
	int guestlan;
	int mac_bits;
	int portno;
	enum qeth_card_types type;
	enum qeth_link_types link_type;
	int initial_mtu;
	int max_mtu;
	int broadcast_capable;
	int unique_id;
	bool layer_enforced;
@@ -966,6 +963,7 @@ extern struct qeth_card_list_struct qeth_core_card_list;
extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];

struct net_device *qeth_clone_netdev(struct net_device *orig);
void qeth_set_recovery_task(struct qeth_card *);
void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
@@ -995,7 +993,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *);
void qeth_setadp_promisc_mode(struct qeth_card *);
struct net_device_stats *qeth_get_stats(struct net_device *);
int qeth_change_mtu(struct net_device *, int);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
void qeth_prepare_control_data(struct qeth_card *, int,
@@ -1050,7 +1047,9 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
				      struct net_device *dev,
				      netdev_features_t features);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
		       struct qeth_hdr **hdr, unsigned int hdr_len,
		       unsigned int proto_len, unsigned int *elements);

/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
+183 −100
Original line number Diff line number Diff line
@@ -653,7 +653,6 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
						cmd->hdr.return_code, card);
				}
				card->lan_online = 0;
				if (card->dev && netif_carrier_ok(card->dev))
				netif_carrier_off(card->dev);
				return NULL;
			case IPA_CMD_STARTLAN:
@@ -1921,7 +1920,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
		memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
		       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
	}
	tmp = ((__u8)card->info.portno) | 0x80;
	tmp = ((u8)card->dev->dev_port) | 0x80;
	memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
@@ -2279,19 +2278,42 @@ static int qeth_cm_setup(struct qeth_card *card)

}

static int qeth_get_initial_mtu_for_card(struct qeth_card *card)
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
{
	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
		return card->info.max_mtu;
	case QETH_CARD_TYPE_OSD:
	case QETH_CARD_TYPE_OSX:
		if (!card->options.layer2)
			return ETH_DATA_LEN - 8; /* L3: allow for LLC + SNAP */
		/* fall through */
	default:
		return ETH_DATA_LEN;
	struct net_device *dev = card->dev;
	unsigned int new_mtu;

	if (!max_mtu) {
		/* IQD needs accurate max MTU to set up its RX buffers: */
		if (IS_IQD(card))
			return -EINVAL;
		/* tolerate quirky HW: */
		max_mtu = ETH_MAX_MTU;
	}

	rtnl_lock();
	if (IS_IQD(card)) {
		/* move any device with default MTU to new max MTU: */
		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;

		/* adjust RX buffer size to new max MTU: */
		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
		if (dev->max_mtu && dev->max_mtu != max_mtu)
			qeth_free_qdio_buffers(card);
	} else {
		if (dev->mtu)
			new_mtu = dev->mtu;
		/* default MTUs for first setup: */
		else if (card->options.layer2)
			new_mtu = ETH_DATA_LEN;
		else
			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
	}

	dev->max_mtu = max_mtu;
	dev->mtu = min(new_mtu, max_mtu);
	rtnl_unlock();
	return 0;
}

static int qeth_get_mtu_outof_framesize(int framesize)
@@ -2310,21 +2332,6 @@ static int qeth_get_mtu_outof_framesize(int framesize)
	}
}

static int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
{
	switch (card->info.type) {
	case QETH_CARD_TYPE_OSD:
	case QETH_CARD_TYPE_OSM:
	case QETH_CARD_TYPE_OSX:
	case QETH_CARD_TYPE_IQD:
		return ((mtu >= 576) &&
			(mtu <= card->info.max_mtu));
	case QETH_CARD_TYPE_OSN:
	default:
		return 1;
	}
}

static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
		unsigned long data)
{
@@ -2343,29 +2350,10 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
	if (card->info.type == QETH_CARD_TYPE_IQD) {
		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
		mtu = qeth_get_mtu_outof_framesize(framesize);
		if (!mtu) {
			iob->rc = -EINVAL;
			QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
			return 0;
		}
		if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
			/* frame size has changed */
			if (card->dev &&
			    ((card->dev->mtu == card->info.initial_mtu) ||
			     (card->dev->mtu > mtu)))
				card->dev->mtu = mtu;
			qeth_free_qdio_buffers(card);
		}
		card->info.initial_mtu = mtu;
		card->info.max_mtu = mtu;
		card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
	} else {
		card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
			iob->data);
		card->info.initial_mtu = min(card->info.max_mtu,
					qeth_get_initial_mtu_for_card(card));
		card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
	}
	*(u16 *)reply->param = mtu;

	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
@@ -2384,6 +2372,7 @@ static int qeth_ulp_enable(struct qeth_card *card)
	int rc;
	char prot_type;
	struct qeth_cmd_buffer *iob;
	u16 max_mtu;

	/*FIXME: trace view callbacks*/
	QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
@@ -2391,8 +2380,7 @@ static int qeth_ulp_enable(struct qeth_card *card)
	iob = qeth_wait_for_buffer(&card->write);
	memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);

	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
		(__u8) card->info.portno;
	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
	if (card->options.layer2)
		if (card->info.type == QETH_CARD_TYPE_OSN)
			prot_type = QETH_PROT_OSN2;
@@ -2407,9 +2395,10 @@ static int qeth_ulp_enable(struct qeth_card *card)
	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
	rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
				    qeth_ulp_enable_cb, NULL);
				    qeth_ulp_enable_cb, &max_mtu);
	if (rc)
		return rc;

	return qeth_update_max_mtu(card, max_mtu);
}

static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -2920,7 +2909,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
	cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
	/* cmd->hdr.seqno is set by qeth_send_control_data() */
	cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
	cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
	cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
	if (card->options.layer2)
		cmd->hdr.prim_version_no = 2;
	else
@@ -3506,13 +3495,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
	qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
	if (atomic_read(&queue->set_pci_flags_count))
		qdio_flags |= QDIO_FLAG_PCI_OUT;
	atomic_add(count, &queue->used_buffers);

	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
		     queue->queue_no, index, count);
	if (queue->card->options.performance_stats)
		queue->card->perf_stats.outbound_do_qdio_time +=
			qeth_get_micros() -
			queue->card->perf_stats.outbound_do_qdio_start_time;
	atomic_add(count, &queue->used_buffers);
	if (rc) {
		queue->card->stats.tx_errors += count;
		/* ignore temporary SIGA errors without busy condition */
@@ -3577,7 +3567,7 @@ static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
{
	struct qeth_card *card = (struct qeth_card *)card_ptr;

	if (card->dev && (card->dev->flags & IFF_UP))
	if (card->dev->flags & IFF_UP)
		napi_schedule(&card->napi);
}

@@ -3841,6 +3831,17 @@ int qeth_get_elements_for_frags(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);

static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
{
	unsigned int elements = qeth_get_elements_for_frags(skb);
	addr_t end = (addr_t)skb->data + skb_headlen(skb);
	addr_t start = (addr_t)skb->data + data_offset;

	if (start != end)
		elements += qeth_get_elements_for_range(start, end);
	return elements;
}

/**
 * qeth_get_elements_no() -	find number of SBALEs for skb data, inc. frags.
 * @card:			qeth card structure, to check max. elems.
@@ -3856,12 +3857,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
int qeth_get_elements_no(struct qeth_card *card,
		     struct sk_buff *skb, int extra_elems, int data_offset)
{
	addr_t end = (addr_t)skb->data + skb_headlen(skb);
	int elements = qeth_get_elements_for_frags(skb);
	addr_t start = (addr_t)skb->data + data_offset;

	if (start != end)
		elements += qeth_get_elements_for_range(start, end);
	int elements = qeth_count_elements(skb, data_offset);

	if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
		QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
@@ -3895,32 +3891,87 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);

/**
 * qeth_push_hdr() - push a qeth_hdr onto an skb.
 * @skb: skb that the qeth_hdr should be pushed onto.
 * qeth_add_hw_header() - add a HW header to an skb.
 * @skb: skb that the HW header should be added to.
 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
 *	 it contains a valid pointer to a qeth_hdr.
 * @len: length of the hdr that needs to be pushed on.
 * @hdr_len: length of the HW header.
 * @proto_len: length of protocol headers that need to be in same page as the
 *	       HW header.
 *
 * Returns the pushed length. If the header can't be pushed on
 * (eg. because it would cross a page boundary), it is allocated from
 * the cache instead and 0 is returned.
 * The number of needed buffer elements is returned in @elements.
 * Error to create the hdr is indicated by returning with < 0.
 */
int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len)
{
	if (skb_headroom(skb) >= len &&
	    qeth_get_elements_for_range((addr_t)skb->data - len,
					(addr_t)skb->data) == 1) {
		*hdr = skb_push(skb, len);
		return len;
int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
		       struct qeth_hdr **hdr, unsigned int hdr_len,
		       unsigned int proto_len, unsigned int *elements)
{
	const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
	const unsigned int contiguous = proto_len ? proto_len : 1;
	unsigned int __elements;
	addr_t start, end;
	bool push_ok;
	int rc;

check_layout:
	start = (addr_t)skb->data - hdr_len;
	end = (addr_t)skb->data;

	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
		/* Push HW header into same page as first protocol header. */
		push_ok = true;
		__elements = qeth_count_elements(skb, 0);
	} else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
		/* Push HW header into a new page. */
		push_ok = true;
		__elements = 1 + qeth_count_elements(skb, 0);
	} else {
		/* Use header cache, copy protocol headers up. */
		push_ok = false;
		__elements = 1 + qeth_count_elements(skb, proto_len);
	}

	/* Compress skb to fit into one IO buffer: */
	if (__elements > max_elements) {
		if (!skb_is_nonlinear(skb)) {
			/* Drop it, no easy way of shrinking it further. */
			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
					 max_elements, __elements, skb->len);
			return -E2BIG;
		}

		rc = skb_linearize(skb);
		if (card->options.performance_stats) {
			if (rc)
				card->perf_stats.tx_linfail++;
			else
				card->perf_stats.tx_lin++;
		}
		if (rc)
			return rc;

		/* Linearization changed the layout, re-evaluate: */
		goto check_layout;
	}

	*elements = __elements;
	/* Add the header: */
	if (push_ok) {
		*hdr = skb_push(skb, hdr_len);
		return hdr_len;
	}
	/* fall back */
	*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
	if (!*hdr)
		return -ENOMEM;
	/* Copy protocol headers behind HW header: */
	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_push_hdr);
EXPORT_SYMBOL_GPL(qeth_add_hw_header);

static void __qeth_fill_buffer(struct sk_buff *skb,
			       struct qeth_qdio_out_buffer *buf,
@@ -4200,24 +4251,6 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);

int qeth_change_mtu(struct net_device *dev, int new_mtu)
{
	struct qeth_card *card;
	char dbf_text[15];

	card = dev->ml_priv;

	QETH_CARD_TEXT(card, 4, "chgmtu");
	sprintf(dbf_text, "%8x", new_mtu);
	QETH_CARD_TEXT(card, 4, dbf_text);

	if (!qeth_mtu_is_valid(card, new_mtu))
		return -EINVAL;
	dev->mtu = new_mtu;
	return 0;
}
EXPORT_SYMBOL_GPL(qeth_change_mtu);

struct net_device_stats *qeth_get_stats(struct net_device *dev)
{
	struct qeth_card *card;
@@ -4793,9 +4826,6 @@ int qeth_vm_request_mac(struct qeth_card *card)

	QETH_DBF_TEXT(SETUP, 2, "vmreqmac");

	if (!card->dev)
		return -ENODEV;

	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
	if (!request || !response) {
@@ -5675,6 +5705,53 @@ static void qeth_clear_dbf_list(void)
	mutex_unlock(&qeth_dbf_list_mutex);
}

static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
	struct net_device *dev;

	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
		dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
		break;
	case QETH_CARD_TYPE_OSN:
		dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
		break;
	default:
		dev = alloc_etherdev(0);
	}

	if (!dev)
		return NULL;

	dev->ml_priv = card;
	dev->watchdog_timeo = QETH_TX_TIMEOUT;
	dev->min_mtu = IS_OSN(card) ? 64 : 576;
	 /* initialized when device first goes online: */
	dev->max_mtu = 0;
	dev->mtu = 0;
	SET_NETDEV_DEV(dev, &card->gdev->dev);
	netif_carrier_off(dev);

	if (!IS_OSN(card)) {
		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		dev->hw_features |= NETIF_F_SG;
		dev->vlan_features |= NETIF_F_SG;
	}

	return dev;
}

struct net_device *qeth_clone_netdev(struct net_device *orig)
{
	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);

	if (!clone)
		return NULL;

	clone->dev_port = orig->dev_port;
	return clone;
}

static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
	struct qeth_card *card;
@@ -5724,6 +5801,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
		goto err_card;
	}

	card->dev = qeth_alloc_netdev(card);
	if (!card->dev)
		goto err_card;

	qeth_determine_capabilities(card);
	enforced_disc = qeth_enforce_discipline(card);
	switch (enforced_disc) {
@@ -5734,7 +5815,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
		card->info.layer_enforced = true;
		rc = qeth_core_load_discipline(card, enforced_disc);
		if (rc)
			goto err_card;
			goto err_load;

		gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
					? card->discipline->devtype
@@ -5752,6 +5833,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)

err_disc:
	qeth_core_free_discipline(card);
err_load:
	free_netdev(card->dev);
err_card:
	qeth_core_free_card(card);
err_dev:
@@ -5774,10 +5857,10 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
	write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
	list_del(&card->list);
	write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
	free_netdev(card->dev);
	qeth_core_free_card(card);
	dev_set_drvdata(&gdev->dev, NULL);
	put_device(&gdev->dev);
	return;
}

static int qeth_core_set_online(struct ccwgroup_device *gdev)
@@ -5955,7 +6038,7 @@ static struct {
	{"tx skbs packing"},
	{"tx buffers packing"},
	{"tx sg skbs"},
	{"tx sg frags"},
	{"tx buffer elements"},
/* 10 */{"rx sg skbs"},
	{"rx sg frags"},
	{"rx sg page allocs"},
@@ -6014,7 +6097,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
	data[6] = card->perf_stats.skbs_sent_pack;
	data[7] = card->perf_stats.bufs_sent_pack;
	data[8] = card->perf_stats.sg_skbs_sent;
	data[9] = card->perf_stats.sg_frags_sent;
	data[9] = card->perf_stats.buf_elements_sent;
	data[10] = card->perf_stats.sg_skbs_rx;
	data[11] = card->perf_stats.sg_frags_rx;
	data[12] = card->perf_stats.sg_alloc_page_rx;
+1 −0
Original line number Diff line number Diff line
@@ -65,6 +65,7 @@ enum qeth_card_types {
};

#define IS_IQD(card)	((card)->info.type == QETH_CARD_TYPE_IQD)
#define IS_OSN(card)	((card)->info.type == QETH_CARD_TYPE_OSN)

#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
+14 −4
Original line number Diff line number Diff line
@@ -112,7 +112,7 @@ static ssize_t qeth_dev_portno_show(struct device *dev,
	if (!card)
		return -EINVAL;

	return sprintf(buf, "%i\n", card->info.portno);
	return sprintf(buf, "%i\n", card->dev->dev_port);
}

static ssize_t qeth_dev_portno_store(struct device *dev,
@@ -143,8 +143,6 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
		rc = -EINVAL;
		goto out;
	}
	card->info.portno = portno;
	if (card->dev)
	card->dev->dev_port = portno;
out:
	mutex_unlock(&card->conf_mutex);
@@ -388,6 +386,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct qeth_card *card = dev_get_drvdata(dev);
	struct net_device *ndev;
	char *tmp;
	int i, rc = 0;
	enum qeth_discipline_id newdis;
@@ -424,8 +423,19 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,

	card->info.mac_bits = 0;
	if (card->discipline) {
		/* start with a new, pristine netdevice: */
		ndev = qeth_clone_netdev(card->dev);
		if (!ndev) {
			rc = -ENOMEM;
			goto out;
		}

		card->discipline->remove(card->gdev);
		qeth_core_free_discipline(card);
		card->options.layer2 = -1;

		free_netdev(card->dev);
		card->dev = ndev;
	}

	rc = qeth_core_load_discipline(card, newdis);
+48 −128
Original line number Diff line number Diff line
@@ -641,97 +641,58 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
		qeth_promisc_to_bridge(card);
}

static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb,
			    struct qeth_qdio_out_q *queue, int cast_type)
static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb,
			struct qeth_qdio_out_q *queue, int cast_type, int ipv)
{
	unsigned int data_offset = ETH_HLEN;
	struct qeth_hdr *hdr;
	int rc;

	hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
	if (!hdr)
		return -ENOMEM;
	qeth_l2_fill_header(hdr, skb, cast_type, skb->len);
	skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr),
				  data_offset);

	if (!qeth_get_elements_no(card, skb, 1, data_offset)) {
		rc = -E2BIG;
		goto out;
	}
	rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
				      sizeof(*hdr) + data_offset);
out:
	if (rc)
		kmem_cache_free(qeth_core_header_cache, hdr);
	return rc;
}

static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
			    struct qeth_qdio_out_q *queue, int cast_type,
			    int ipv)
{
	int push_len = sizeof(struct qeth_hdr);
	unsigned int elements, nr_frags;
	unsigned int hdr_elements = 0;
	const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
	const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
	unsigned int frame_len = skb->len;
	unsigned int data_offset = 0;
	struct qeth_hdr *hdr = NULL;
	unsigned int hd_len = 0;
	int rc;

	/* fix hardware limitation: as long as we do not have sbal
	 * chaining we can not send long frag lists
	 */
	if (!qeth_get_elements_no(card, skb, 0, 0)) {
		rc = skb_linearize(skb);
	unsigned int elements;
	int push_len, rc;
	bool is_sg;

		if (card->options.performance_stats) {
			if (rc)
				card->perf_stats.tx_linfail++;
			else
				card->perf_stats.tx_lin++;
		}
	rc = skb_cow_head(skb, hw_hdr_len);
	if (rc)
		return rc;
	}
	nr_frags = skb_shinfo(skb)->nr_frags;

	rc = skb_cow_head(skb, push_len);
	if (rc)
		return rc;
	push_len = qeth_push_hdr(skb, &hdr, push_len);
	push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
				      &elements);
	if (push_len < 0)
		return push_len;
	if (!push_len) {
		/* hdr was allocated from cache */
		hd_len = sizeof(*hdr);
		hdr_elements = 1;
		/* HW header needs its own buffer element. */
		hd_len = hw_hdr_len + proto_len;
		data_offset = proto_len;
	}
	qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
	qeth_l2_fill_header(hdr, skb, cast_type, frame_len);
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
		if (card->options.performance_stats)
			card->perf_stats.tx_csum++;
	}

	elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
	if (!elements) {
		rc = -E2BIG;
		goto out;
	is_sg = skb_is_nonlinear(skb);
	if (IS_IQD(card)) {
		rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
					      hd_len);
	} else {
		/* TODO: drop skb_orphan() once TX completion is fast enough */
		skb_orphan(skb);
		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
					 hd_len, elements);
	}
	elements += hdr_elements;

	/* TODO: remove the skb_orphan() once TX completion is fast enough */
	skb_orphan(skb);
	rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements);
out:
	if (!rc) {
		if (card->options.performance_stats && nr_frags) {
		if (card->options.performance_stats) {
			card->perf_stats.buf_elements_sent += elements;
			if (is_sg)
				card->perf_stats.sg_skbs_sent++;
			/* nr_frags + skb->data */
			card->perf_stats.sg_frags_sent += nr_frags + 1;
		}
	} else {
		if (hd_len)
		if (!push_len)
			kmem_cache_free(qeth_core_header_cache, hdr);
		if (rc == -EBUSY)
			/* roll back to ETH header */
@@ -781,16 +742,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
	}
	netif_stop_queue(dev);

	switch (card->info.type) {
	case QETH_CARD_TYPE_OSN:
	if (IS_OSN(card))
		rc = qeth_l2_xmit_osn(card, skb, queue);
		break;
	case QETH_CARD_TYPE_IQD:
		rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
		break;
	default:
		rc = qeth_l2_xmit_osa(card, skb, queue, cast_type, ipv);
	}
	else
		rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv);

	if (!rc) {
		card->stats.tx_packets++;
@@ -899,13 +854,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)

	if (cgdev->state == CCWGROUP_ONLINE)
		qeth_l2_set_offline(cgdev);

	if (card->dev) {
	unregister_netdev(card->dev);
		free_netdev(card->dev);
		card->dev = NULL;
	}
	return;
}

static const struct ethtool_ops qeth_l2_ethtool_ops = {
@@ -934,7 +883,6 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
	.ndo_set_rx_mode	= qeth_l2_set_rx_mode,
	.ndo_do_ioctl		= qeth_do_ioctl,
	.ndo_set_mac_address    = qeth_l2_set_mac_address,
	.ndo_change_mtu	   	= qeth_change_mtu,
	.ndo_vlan_rx_add_vid	= qeth_l2_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid   = qeth_l2_vlan_rx_kill_vid,
	.ndo_tx_timeout	   	= qeth_tx_timeout,
@@ -944,35 +892,19 @@ static const struct net_device_ops qeth_l2_netdev_ops = {

static int qeth_l2_setup_netdev(struct qeth_card *card)
{
	switch (card->info.type) {
	case QETH_CARD_TYPE_IQD:
		card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
					 ether_setup);
		break;
	case QETH_CARD_TYPE_OSN:
		card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
					 ether_setup);
		break;
	default:
		card->dev = alloc_etherdev(0);
	}
	int rc;

	if (!card->dev)
		return -ENODEV;
	if (card->dev->netdev_ops)
		return 0;

	card->dev->ml_priv = card;
	card->dev->priv_flags |= IFF_UNICAST_FLT;
	card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
	card->dev->mtu = card->info.initial_mtu;
	card->dev->min_mtu = 64;
	card->dev->max_mtu = ETH_MAX_MTU;
	card->dev->dev_port = card->info.portno;
	card->dev->netdev_ops = &qeth_l2_netdev_ops;
	if (card->info.type == QETH_CARD_TYPE_OSN) {
		card->dev->ethtool_ops = &qeth_l2_osn_ops;
		card->dev->flags |= IFF_NOARP;
	} else {
		card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
		card->dev->needed_headroom = sizeof(struct qeth_hdr);
	}

	if (card->info.type == QETH_CARD_TYPE_OSM)
@@ -980,14 +912,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
	else
		card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;

	if (card->info.type != QETH_CARD_TYPE_OSN &&
	    card->info.type != QETH_CARD_TYPE_IQD) {
		card->dev->priv_flags &= ~IFF_TX_SKB_SHARING;
		card->dev->needed_headroom = sizeof(struct qeth_hdr);
		card->dev->hw_features |= NETIF_F_SG;
		card->dev->vlan_features |= NETIF_F_SG;
	}

	if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
		card->dev->features |= NETIF_F_SG;
		/* OSA 3S and earlier has no RX/TX support */
@@ -1006,12 +930,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
		card->dev->vlan_features |= NETIF_F_RXCSUM;
	}

	card->info.broadcast_capable = 1;
	qeth_l2_request_initial_mac(card);
	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
	netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
	netif_carrier_off(card->dev);
	return register_netdev(card->dev);
	rc = register_netdev(card->dev);
	if (rc)
		card->dev->netdev_ops = NULL;
	return rc;
}

static int qeth_l2_start_ipassists(struct qeth_card *card)
@@ -1057,10 +981,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
		dev_info(&card->gdev->dev,
		"The device represents a Bridge Capable Port\n");

	if (!card->dev && qeth_l2_setup_netdev(card)) {
		rc = -ENODEV;
	rc = qeth_l2_setup_netdev(card);
	if (rc)
		goto out_remove;
	}

	if (card->info.type != QETH_CARD_TYPE_OSN &&
	    !qeth_l2_send_setmac(card, card->dev->dev_addr))
@@ -1163,7 +1086,6 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
	QETH_DBF_TEXT(SETUP, 3, "setoffl");
	QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));

	if (card->dev && netif_carrier_ok(card->dev))
	netif_carrier_off(card->dev);
	recover_flag = card->state;
	if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
@@ -1237,7 +1159,6 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
{
	struct qeth_card *card = dev_get_drvdata(&gdev->dev);

	if (card->dev)
	netif_device_detach(card->dev);
	qeth_set_allowed_threads(card, 0, 1);
	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -1271,7 +1192,6 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
		rc = __qeth_l2_set_online(card->gdev, 0);
out:
	qeth_set_allowed_threads(card, 0xffffffff, 0);
	if (card->dev)
	netif_device_attach(card->dev);
	if (rc)
		dev_warn(&card->gdev->dev, "The qeth device driver "
Loading