Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1f3466a0 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 's390-net'



Ursula Braun says:

====================
s390/qeth patches for net

here are 2 s390/qeth patches built for net fixing a problem with AF_IUCV
traffic through HiperSockets.
And we come up with an update for the MAINTAINERS file to establish
Julian as Co-Maintainer for drivers/s390/net and net/iucv.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 031b8c6d 90b14dc7
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -10808,6 +10808,7 @@ F: drivers/s390/block/dasd*
F:	block/partitions/ibm.c

S390 NETWORK DRIVERS
M:	Julian Wiedmann <jwi@linux.vnet.ibm.com>
M:	Ursula Braun <ubraun@linux.vnet.ibm.com>
L:	linux-s390@vger.kernel.org
W:	http://www.ibm.com/developerworks/linux/linux390/
@@ -10838,6 +10839,7 @@ S: Supported
F:	drivers/s390/scsi/zfcp_*

S390 IUCV NETWORK LAYER
M:	Julian Wiedmann <jwi@linux.vnet.ibm.com>
M:	Ursula Braun <ubraun@linux.vnet.ibm.com>
L:	linux-s390@vger.kernel.org
W:	http://www.ibm.com/developerworks/linux/linux390/
+2 −1
Original line number Diff line number Diff line
@@ -961,7 +961,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
			 int extra_elems, int data_offset);
int qeth_get_elements_for_frags(struct sk_buff *);
int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
			struct sk_buff *, struct qeth_hdr *, int, int, int);
+3 −2
Original line number Diff line number Diff line
@@ -3837,6 +3837,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
 * @card:			qeth card structure, to check max. elems.
 * @skb:			SKB address
 * @extra_elems:		extra elems needed, to check against max.
 * @data_offset:		range starts at skb->data + data_offset
 *
 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
 * skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3845,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
 * Note: extra_elems is not included in the returned result.
 */
int qeth_get_elements_no(struct qeth_card *card,
		     struct sk_buff *skb, int extra_elems)
		     struct sk_buff *skb, int extra_elems, int data_offset)
{
	int elements = qeth_get_elements_for_range(
				(addr_t)skb->data,
				(addr_t)skb->data + data_offset,
				(addr_t)skb->data + skb_headlen(skb)) +
			qeth_get_elements_for_frags(skb);

+3 −2
Original line number Diff line number Diff line
@@ -849,7 +849,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
	 * chaining we can not send long frag lists
	 */
	if ((card->info.type != QETH_CARD_TYPE_IQD) &&
	    !qeth_get_elements_no(card, new_skb, 0)) {
	    !qeth_get_elements_no(card, new_skb, 0, 0)) {
		int lin_rc = skb_linearize(new_skb);

		if (card->options.performance_stats) {
@@ -894,7 +894,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
		}
	}

	elements = qeth_get_elements_no(card, new_skb, elements_needed);
	elements = qeth_get_elements_no(card, new_skb, elements_needed,
					(data_offset > 0) ? data_offset : 0);
	if (!elements) {
		if (data_offset >= 0)
			kmem_cache_free(qeth_core_header_cache, hdr);
+7 −13
Original line number Diff line number Diff line
@@ -2609,17 +2609,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
	char daddr[16];
	struct af_iucv_trans_hdr *iucv_hdr;

	skb_pull(skb, 14);
	card->dev->header_ops->create(skb, card->dev, 0,
				      card->dev->dev_addr, card->dev->dev_addr,
				      card->dev->addr_len);
	skb_pull(skb, 14);
	iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
	memset(hdr, 0, sizeof(struct qeth_hdr));
	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
	hdr->hdr.l3.ext_flags = 0;
	hdr->hdr.l3.length = skb->len;
	hdr->hdr.l3.length = skb->len - ETH_HLEN;
	hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;

	iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
	memset(daddr, 0, sizeof(daddr));
	daddr[0] = 0xfe;
	daddr[1] = 0x80;
@@ -2823,9 +2819,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
	if ((card->info.type == QETH_CARD_TYPE_IQD) &&
	    !skb_is_nonlinear(skb)) {
		new_skb = skb;
		if (new_skb->protocol == ETH_P_AF_IUCV)
			data_offset = 0;
		else
		data_offset = ETH_HLEN;
		hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
		if (!hdr)
@@ -2867,7 +2860,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
	 */
	if ((card->info.type != QETH_CARD_TYPE_IQD) &&
	    ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
	     (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
	     (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
		int lin_rc = skb_linearize(new_skb);

		if (card->options.performance_stats) {
@@ -2909,7 +2902,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)

	elements = use_tso ?
		   qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
		   qeth_get_elements_no(card, new_skb, hdr_elements);
		   qeth_get_elements_no(card, new_skb, hdr_elements,
					(data_offset > 0) ? data_offset : 0);
	if (!elements) {
		if (data_offset >= 0)
			kmem_cache_free(qeth_core_header_cache, hdr);