Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d401c1d1 authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'batadv-next-for-davem-20161108-v2' of git://git.open-mesh.org/linux-merge



Simon Wunderlich says:

====================
pull request for net-next: batman-adv 2016-11-08 v2

This feature and cleanup patchset includes the following changes:

 - netlink and code cleanups by Sven Eckelmann (3 patches)

 - Cleanup and minor fixes by Linus Luessing (3 patches)

 - Speed up multicast update intervals, by Linus Luessing

 - Avoid (re)broadcast in meshes for some easy cases,
   by Linus Luessing

 - Clean up tx return state handling, by Sven Eckelmann (6 patches)

 - Fix some special mac address handling cases, by Sven Eckelmann
   (3 patches)
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a6dfdb4e 93bbaab4
Loading
Loading
Loading
Loading
+22 −11
Original line number Diff line number Diff line
@@ -698,7 +698,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,

	forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size);
	if (!forw_packet_aggr->skb) {
		batadv_forw_packet_free(forw_packet_aggr);
		batadv_forw_packet_free(forw_packet_aggr, true);
		return;
	}

@@ -1611,7 +1611,7 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
	if (hardif_neigh)
		batadv_hardif_neigh_put(hardif_neigh);

	kfree_skb(skb_priv);
	consume_skb(skb_priv);
}

/**
@@ -1783,6 +1783,7 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
	struct delayed_work *delayed_work;
	struct batadv_forw_packet *forw_packet;
	struct batadv_priv *bat_priv;
	bool dropped = false;

	delayed_work = to_delayed_work(work);
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
@@ -1792,8 +1793,10 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);

	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
		dropped = true;
		goto out;
	}

	batadv_iv_ogm_emit(forw_packet);

@@ -1810,7 +1813,7 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
		batadv_iv_ogm_schedule(forw_packet->if_incoming);

out:
	batadv_forw_packet_free(forw_packet);
	batadv_forw_packet_free(forw_packet, dropped);
}

static int batadv_iv_ogm_receive(struct sk_buff *skb,
@@ -1820,17 +1823,18 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
	struct batadv_ogm_packet *ogm_packet;
	u8 *packet_pos;
	int ogm_offset;
	bool ret;
	bool res;
	int ret = NET_RX_DROP;

	ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
	if (!ret)
		return NET_RX_DROP;
	res = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
	if (!res)
		goto free_skb;

	/* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
	 * that does not have B.A.T.M.A.N. IV enabled ?
	 */
	if (bat_priv->algo_ops->iface.enable != batadv_iv_ogm_iface_enable)
		return NET_RX_DROP;
		goto free_skb;

	batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
	batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
@@ -1851,8 +1855,15 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
		ogm_packet = (struct batadv_ogm_packet *)packet_pos;
	}

	ret = NET_RX_SUCCESS;

free_skb:
	if (ret == NET_RX_SUCCESS)
		consume_skb(skb);
	else
		kfree_skb(skb);
	return NET_RX_SUCCESS;

	return ret;
}

#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+18 −12
Original line number Diff line number Diff line
@@ -492,20 +492,21 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
	struct batadv_elp_packet *elp_packet;
	struct batadv_hard_iface *primary_if;
	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
	bool ret;
	bool res;
	int ret = NET_RX_DROP;

	ret = batadv_check_management_packet(skb, if_incoming, BATADV_ELP_HLEN);
	if (!ret)
		return NET_RX_DROP;
	res = batadv_check_management_packet(skb, if_incoming, BATADV_ELP_HLEN);
	if (!res)
		goto free_skb;

	if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
		return NET_RX_DROP;
		goto free_skb;

	/* did we receive a B.A.T.M.A.N. V ELP packet on an interface
	 * that does not have B.A.T.M.A.N. V ELP enabled ?
	 */
	if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
		return NET_RX_DROP;
		goto free_skb;

	elp_packet = (struct batadv_elp_packet *)skb->data;

@@ -516,14 +517,19 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		goto out;
		goto free_skb;

	batadv_v_elp_neigh_update(bat_priv, ethhdr->h_source, if_incoming,
				  elp_packet);

out:
	if (primary_if)
	ret = NET_RX_SUCCESS;
	batadv_hardif_put(primary_if);

free_skb:
	if (ret == NET_RX_SUCCESS)
		consume_skb(skb);
	return NET_RX_SUCCESS;
	else
		kfree_skb(skb);

	return ret;
}
+66 −5
Original line number Diff line number Diff line
@@ -140,6 +140,7 @@ static void batadv_v_ogm_send(struct work_struct *work)
	unsigned char *ogm_buff, *pkt_buff;
	int ogm_buff_len;
	u16 tvlv_len = 0;
	int ret;

	bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
	bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
@@ -182,6 +183,31 @@ static void batadv_v_ogm_send(struct work_struct *work)
		if (!kref_get_unless_zero(&hard_iface->refcount))
			continue;

		ret = batadv_hardif_no_broadcast(hard_iface, NULL, NULL);
		if (ret) {
			char *type;

			switch (ret) {
			case BATADV_HARDIF_BCAST_NORECIPIENT:
				type = "no neighbor";
				break;
			case BATADV_HARDIF_BCAST_DUPFWD:
				type = "single neighbor is source";
				break;
			case BATADV_HARDIF_BCAST_DUPORIG:
				type = "single neighbor is originator";
				break;
			default:
				type = "unknown";
			}

			batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 from ourselve on %s surpressed: %s\n",
				   hard_iface->net_dev->name, type);

			batadv_hardif_put(hard_iface);
			continue;
		}

		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
			   "Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n",
			   ogm_packet->orig, ntohl(ogm_packet->seqno),
@@ -651,6 +677,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
	struct batadv_hard_iface *hard_iface;
	struct batadv_ogm2_packet *ogm_packet;
	u32 ogm_throughput, link_throughput, path_throughput;
	int ret;

	ethhdr = eth_hdr(skb);
	ogm_packet = (struct batadv_ogm2_packet *)(skb->data + ogm_offset);
@@ -716,6 +743,35 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
		if (!kref_get_unless_zero(&hard_iface->refcount))
			continue;

		ret = batadv_hardif_no_broadcast(hard_iface,
						 ogm_packet->orig,
						 hardif_neigh->orig);

		if (ret) {
			char *type;

			switch (ret) {
			case BATADV_HARDIF_BCAST_NORECIPIENT:
				type = "no neighbor";
				break;
			case BATADV_HARDIF_BCAST_DUPFWD:
				type = "single neighbor is source";
				break;
			case BATADV_HARDIF_BCAST_DUPORIG:
				type = "single neighbor is originator";
				break;
			default:
				type = "unknown";
			}

			batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 packet from %pM on %s surpressed: %s\n",
				   ogm_packet->orig, hard_iface->net_dev->name,
				   type);

			batadv_hardif_put(hard_iface);
			continue;
		}

		batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet,
					       orig_node, neigh_node,
					       if_incoming, hard_iface);
@@ -754,18 +810,18 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
	 * B.A.T.M.A.N. V enabled ?
	 */
	if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
		return NET_RX_DROP;
		goto free_skb;

	if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
		return NET_RX_DROP;
		goto free_skb;

	if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
		return NET_RX_DROP;
		goto free_skb;

	ogm_packet = (struct batadv_ogm2_packet *)skb->data;

	if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
		return NET_RX_DROP;
		goto free_skb;

	batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
	batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
@@ -786,7 +842,12 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
	}

	ret = NET_RX_SUCCESS;

free_skb:
	if (ret == NET_RX_SUCCESS)
		consume_skb(skb);
	else
		kfree_skb(skb);

	return ret;
}
+40 −27
Original line number Diff line number Diff line
@@ -948,6 +948,41 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
	return vid;
}

/**
 * batadv_dat_arp_create_reply - create an ARP Reply
 * @bat_priv: the bat priv with all the soft interface information
 * @ip_src: ARP sender IP
 * @ip_dst: ARP target IP
 * @hw_src: Ethernet source and ARP sender MAC
 * @hw_dst: Ethernet destination and ARP target MAC
 * @vid: VLAN identifier (optional, set to zero otherwise)
 *
 * Creates an ARP Reply from the given values, optionally encapsulated in a
 * VLAN header.
 *
 * Return: An skb containing an ARP Reply.
 */
static struct sk_buff *
batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
			    __be32 ip_dst, u8 *hw_src, u8 *hw_dst,
			    unsigned short vid)
{
	struct sk_buff *skb;

	skb = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_dst, bat_priv->soft_iface,
			 ip_src, hw_dst, hw_src, hw_dst);
	if (!skb)
		return NULL;

	skb_reset_mac_header(skb);

	if (vid & BATADV_VLAN_HAS_TAG)
		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
				      vid & VLAN_VID_MASK);

	return skb;
}

/**
 * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
 * answer using DAT
@@ -1005,20 +1040,12 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
			goto out;
		}

		skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
				     bat_priv->soft_iface, ip_dst, hw_src,
				     dat_entry->mac_addr, hw_src);
		skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
						      dat_entry->mac_addr,
						      hw_src, vid);
		if (!skb_new)
			goto out;

		if (vid & BATADV_VLAN_HAS_TAG) {
			skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
						  vid & VLAN_VID_MASK);
			if (!skb_new)
				goto out;
		}

		skb_reset_mac_header(skb_new);
		skb_new->protocol = eth_type_trans(skb_new,
						   bat_priv->soft_iface);
		bat_priv->stats.rx_packets++;
@@ -1081,24 +1108,10 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
	if (!dat_entry)
		goto out;

	skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
			     bat_priv->soft_iface, ip_dst, hw_src,
			     dat_entry->mac_addr, hw_src);

	if (!skb_new)
		goto out;

	/* the rest of the TX path assumes that the mac_header offset pointing
	 * to the inner Ethernet header has been set, therefore reset it now.
	 */
	skb_reset_mac_header(skb_new);

	if (vid & BATADV_VLAN_HAS_TAG) {
		skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
					  vid & VLAN_VID_MASK);
	skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
					      dat_entry->mac_addr, hw_src, vid);
	if (!skb_new)
		goto out;
	}

	/* To preserve backwards compatibility, the node has choose the outgoing
	 * format based on the incoming request packet type. The assumption is
+43 −27
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@

#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
#include <linux/if_ether.h>
@@ -42,17 +43,23 @@
/**
 * batadv_frag_clear_chain - delete entries in the fragment buffer chain
 * @head: head of chain with entries.
 * @dropped: whether the chain is cleared because all fragments are dropped
 *
 * Free fragments in the passed hlist. Should be called with appropriate lock.
 */
static void batadv_frag_clear_chain(struct hlist_head *head)
static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
{
	struct batadv_frag_list_entry *entry;
	struct hlist_node *node;

	hlist_for_each_entry_safe(entry, node, head, list) {
		hlist_del(&entry->list);

		if (dropped)
			kfree_skb(entry->skb);
		else
			consume_skb(entry->skb);

		kfree(entry);
	}
}
@@ -73,7 +80,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
		spin_lock_bh(&chain->lock);

		if (!check_cb || check_cb(chain)) {
			batadv_frag_clear_chain(&chain->fragment_list);
			batadv_frag_clear_chain(&chain->fragment_list, true);
			chain->size = 0;
		}

@@ -118,7 +125,7 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
		return false;

	if (!hlist_empty(&chain->fragment_list))
		batadv_frag_clear_chain(&chain->fragment_list);
		batadv_frag_clear_chain(&chain->fragment_list, true);

	chain->size = 0;
	chain->seqno = seqno;
@@ -220,7 +227,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
		 * exceeds the maximum size of one merged packet. Don't allow
		 * packets to have different total_size.
		 */
		batadv_frag_clear_chain(&chain->fragment_list);
		batadv_frag_clear_chain(&chain->fragment_list, true);
		chain->size = 0;
	} else if (ntohs(frag_packet->total_size) == chain->size) {
		/* All fragments received. Hand over chain to caller. */
@@ -254,6 +261,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
	struct batadv_frag_list_entry *entry;
	struct sk_buff *skb_out;
	int size, hdr_size = sizeof(struct batadv_frag_packet);
	bool dropped = false;

	/* Remove first entry, as this is the destination for the rest of the
	 * fragments.
@@ -270,6 +278,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
	if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
		kfree_skb(skb_out);
		skb_out = NULL;
		dropped = true;
		goto free;
	}

@@ -291,7 +300,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)

free:
	/* Locking is not needed, because 'chain' is not part of any orig. */
	batadv_frag_clear_chain(chain);
	batadv_frag_clear_chain(chain, dropped);
	return skb_out;
}

@@ -433,8 +442,7 @@ static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
 * @orig_node: final destination of the created fragments
 * @neigh_node: next-hop of the created fragments
 *
 * Return: the netdev tx status or -1 in case of error.
 * When -1 is returned the skb is not consumed.
 * Return: the netdev tx status or a negative errno code on a failure
 */
int batadv_frag_send_packet(struct sk_buff *skb,
			    struct batadv_orig_node *orig_node,
@@ -447,7 +455,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
	unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
	unsigned int header_size = sizeof(frag_header);
	unsigned int max_fragment_size, max_packet_size;
	int ret = -1;
	int ret;

	/* To avoid merge and refragmentation at next-hops we never send
	 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -457,13 +465,17 @@ int batadv_frag_send_packet(struct sk_buff *skb,
	max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;

	/* Don't even try to fragment, if we need more than 16 fragments */
	if (skb->len > max_packet_size)
		goto out;
	if (skb->len > max_packet_size) {
		ret = -EAGAIN;
		goto free_skb;
	}

	bat_priv = orig_node->bat_priv;
	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		goto out;
	if (!primary_if) {
		ret = -EINVAL;
		goto put_primary_if;
	}

	/* Create one header to be copied to all fragments */
	frag_header.packet_type = BATADV_UNICAST_FRAG;
@@ -488,34 +500,35 @@ int batadv_frag_send_packet(struct sk_buff *skb,
	/* Eat and send fragments from the tail of skb */
	while (skb->len > max_fragment_size) {
		skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
		if (!skb_fragment)
			goto out;
		if (!skb_fragment) {
			ret = -ENOMEM;
			goto free_skb;
		}

		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
				   skb_fragment->len + ETH_HLEN);
		ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
		if (ret != NET_XMIT_SUCCESS) {
			/* return -1 so that the caller can free the original
			 * skb
			 */
			ret = -1;
			goto out;
			ret = NET_XMIT_DROP;
			goto free_skb;
		}

		frag_header.no++;

		/* The initial check in this function should cover this case */
		if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
			ret = -1;
			goto out;
			ret = -EINVAL;
			goto free_skb;
		}
	}

	/* Make room for the fragment header. */
	if (batadv_skb_head_push(skb, header_size) < 0 ||
	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
		goto out;
	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
		ret = -ENOMEM;
		goto free_skb;
	}

	memcpy(skb->data, &frag_header, header_size);

@@ -524,10 +537,13 @@ int batadv_frag_send_packet(struct sk_buff *skb,
	batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
			   skb->len + ETH_HLEN);
	ret = batadv_send_unicast_skb(skb, neigh_node);
	/* skb was consumed */
	skb = NULL;

out:
	if (primary_if)
put_primary_if:
	batadv_hardif_put(primary_if);
free_skb:
	kfree_skb(skb);

	return ret;
}
Loading