Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d9fbfb94 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'tipc-namespaces'

Ying Xue says:

====================
tipc: make tipc support namespace

This patchset aims to add net namespace support for TIPC stack.

Currently TIPC module declares the following global resources:
- TIPC network idenfication number
- TIPC node table
- TIPC bearer list table
- TIPC broadcast link
- TIPC socket reference table
- TIPC name service table
- TIPC node address
- TIPC service subscriber server
- TIPC random value
- TIPC netlink

In order that TIPC is aware of namespace, above each resource must be
allocated, initialized and destroyed inside per namespace. Therefore,
the major works of this patchset are to isolate these global resources
and make them private for each namespace. However, before these changes
come true, some necessary preparation works must be first done: convert
socket reference table with generic rhashtable, cleanup core.c and
core.h files, remove unnecessary wrapper functions for kernel timer
interfaces and so on.

It should be noted that commit ##1 ("tipc: fix bug in broadcast
retransmit code") was already submitted to 'net' tree, so please see
below link:

http://patchwork.ozlabs.org/patch/426717/



Since it is prerequisite for the rest of the series to apply, I
prepend them to the series.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 45e81834 d49e2041
Loading
Loading
Loading
Loading
+44 −1
Original line number Diff line number Diff line
@@ -34,8 +34,51 @@
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
#include <linux/kernel.h>
#include "addr.h"
#include "core.h"

/**
 * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
 */
int in_own_cluster(struct net *net, u32 addr)
{
	return in_own_cluster_exact(net, addr) || !addr;
}

int in_own_cluster_exact(struct net *net, u32 addr)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return !((addr ^ tn->own_addr) >> 12);
}

/**
 * in_own_node - test for node inclusion; <0.0.0> always matches
 */
int in_own_node(struct net *net, u32 addr)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return (addr == tn->own_addr) || !addr;
}

/**
 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
 *
 * Needed when address of a named message must be looked up a second time
 * after a network hop.
 */
u32 addr_domain(struct net *net, u32 sc)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	if (likely(sc == TIPC_NODE_SCOPE))
		return tn->own_addr;
	if (sc == TIPC_CLUSTER_SCOPE)
		return tipc_cluster_mask(tn->own_addr);
	return tipc_zone_mask(tn->own_addr);
}

/**
 * tipc_addr_domain_valid - validates a network domain address
+8 −37
Original line number Diff line number Diff line
@@ -37,7 +37,10 @@
#ifndef _TIPC_ADDR_H
#define _TIPC_ADDR_H

#include "core.h"
#include <linux/types.h>
#include <linux/tipc.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>

#define TIPC_ZONE_MASK		0xff000000u
#define TIPC_CLUSTER_MASK	0xfffff000u
@@ -52,42 +55,10 @@ static inline u32 tipc_cluster_mask(u32 addr)
	return addr & TIPC_CLUSTER_MASK;
}

static inline int in_own_cluster_exact(u32 addr)
{
	return !((addr ^ tipc_own_addr) >> 12);
}

/**
 * in_own_node - test for node inclusion; <0.0.0> always matches
 */
static inline int in_own_node(u32 addr)
{
	return (addr == tipc_own_addr) || !addr;
}

/**
 * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
 */
static inline int in_own_cluster(u32 addr)
{
	return in_own_cluster_exact(addr) || !addr;
}

/**
 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
 *
 * Needed when address of a named message must be looked up a second time
 * after a network hop.
 */
static inline u32 addr_domain(u32 sc)
{
	if (likely(sc == TIPC_NODE_SCOPE))
		return tipc_own_addr;
	if (sc == TIPC_CLUSTER_SCOPE)
		return tipc_cluster_mask(tipc_own_addr);
	return tipc_zone_mask(tipc_own_addr);
}

int in_own_cluster(struct net *net, u32 addr);
int in_own_cluster_exact(struct net *net, u32 addr);
int in_own_node(struct net *net, u32 addr);
u32 addr_domain(struct net *net, u32 sc);
int tipc_addr_domain_valid(u32);
int tipc_addr_node_valid(u32 addr);
int tipc_in_scope(u32 domain, u32 addr);
+184 −188
Original line number Diff line number Diff line
@@ -35,77 +35,14 @@
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
#include "link.h"
#include "socket.h"
#include "msg.h"
#include "bcast.h"
#include "name_distr.h"
#include "core.h"

#define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
#define	BCLINK_WIN_DEFAULT	20	/* bcast link window size (default) */
#define	BCBEARER		MAX_BEARERS

/**
 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
 * @primary: pointer to primary bearer
 * @secondary: pointer to secondary bearer
 *
 * Bearers must have same priority and same set of reachable destinations
 * to be paired.
 */

struct tipc_bcbearer_pair {
	struct tipc_bearer *primary;
	struct tipc_bearer *secondary;
};

/**
 * struct tipc_bcbearer - bearer used by broadcast link
 * @bearer: (non-standard) broadcast bearer structure
 * @media: (non-standard) broadcast media structure
 * @bpairs: array of bearer pairs
 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
 * @remains: temporary node map used by tipc_bcbearer_send()
 * @remains_new: temporary node map used tipc_bcbearer_send()
 *
 * Note: The fields labelled "temporary" are incorporated into the bearer
 * to avoid consuming potentially limited stack space through the use of
 * large local variables within multicast routines.  Concurrent access is
 * prevented through use of the spinlock "bclink_lock".
 */
struct tipc_bcbearer {
	struct tipc_bearer bearer;
	struct tipc_media media;
	struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
	struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
	struct tipc_node_map remains;
	struct tipc_node_map remains_new;
};

/**
 * struct tipc_bclink - link used for broadcast messages
 * @lock: spinlock governing access to structure
 * @link: (non-standard) broadcast link structure
 * @node: (non-standard) node structure representing b'cast link's peer node
 * @flags: represent bclink states
 * @bcast_nodes: map of broadcast-capable nodes
 * @retransmit_to: node that most recently requested a retransmit
 *
 * Handles sequence numbering, fragmentation, bundling, etc.
 */
struct tipc_bclink {
	spinlock_t lock;
	struct tipc_link link;
	struct tipc_node node;
	unsigned int flags;
	struct tipc_node_map bcast_nodes;
	struct tipc_node *retransmit_to;
};

static struct tipc_bcbearer *bcbearer;
static struct tipc_bclink *bclink;
static struct tipc_link *bcl;

const char tipc_bclink_name[] = "broadcast-link";

@@ -115,25 +52,28 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);

static void tipc_bclink_lock(void)
static void tipc_bclink_lock(struct net *net)
{
	spin_lock_bh(&bclink->lock);
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	spin_lock_bh(&tn->bclink->lock);
}

static void tipc_bclink_unlock(void)
static void tipc_bclink_unlock(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_node *node = NULL;

	if (likely(!bclink->flags)) {
		spin_unlock_bh(&bclink->lock);
	if (likely(!tn->bclink->flags)) {
		spin_unlock_bh(&tn->bclink->lock);
		return;
	}

	if (bclink->flags & TIPC_BCLINK_RESET) {
		bclink->flags &= ~TIPC_BCLINK_RESET;
		node = tipc_bclink_retransmit_to();
	if (tn->bclink->flags & TIPC_BCLINK_RESET) {
		tn->bclink->flags &= ~TIPC_BCLINK_RESET;
		node = tipc_bclink_retransmit_to(net);
	}
	spin_unlock_bh(&bclink->lock);
	spin_unlock_bh(&tn->bclink->lock);

	if (node)
		tipc_link_reset_all(node);
@@ -144,9 +84,11 @@ uint tipc_bclink_get_mtu(void)
	return MAX_PKT_DEFAULT_MCAST;
}

void tipc_bclink_set_flags(unsigned int flags)
void tipc_bclink_set_flags(struct net *net, unsigned int flags)
{
	bclink->flags |= flags;
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tn->bclink->flags |= flags;
}

static u32 bcbuf_acks(struct sk_buff *buf)
@@ -164,31 +106,40 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}

void tipc_bclink_add_node(u32 addr)
void tipc_bclink_add_node(struct net *net, u32 addr)
{
	tipc_bclink_lock();
	tipc_nmap_add(&bclink->bcast_nodes, addr);
	tipc_bclink_unlock();
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
	tipc_bclink_unlock(net);
}

void tipc_bclink_remove_node(u32 addr)
void tipc_bclink_remove_node(struct net *net, u32 addr)
{
	tipc_bclink_lock();
	tipc_nmap_remove(&bclink->bcast_nodes, addr);
	tipc_bclink_unlock();
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
	tipc_bclink_unlock(net);
}

static void bclink_set_last_sent(void)
static void bclink_set_last_sent(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

	if (bcl->next_out)
		bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
	else
		bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
}

u32 tipc_bclink_get_last_sent(void)
u32 tipc_bclink_get_last_sent(struct net *net)
{
	return bcl->fsm_msg_cnt;
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return tn->bcl->fsm_msg_cnt;
}

static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
@@ -203,9 +154,11 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
 *
 * Called with bclink_lock locked
 */
struct tipc_node *tipc_bclink_retransmit_to(void)
struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
{
	return bclink->retransmit_to;
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return tn->bclink->retransmit_to;
}

/**
@@ -215,15 +168,17 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
 *
 * Called with bclink_lock locked
 */
static void bclink_retransmit_pkt(u32 after, u32 to)
static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
{
	struct sk_buff *skb;
	struct tipc_link *bcl = tn->bcl;

	skb_queue_walk(&bcl->outqueue, skb) {
		if (more(buf_seqno(skb), after))
		if (more(buf_seqno(skb), after)) {
			tipc_link_retransmit(bcl, skb, mod(to - after));
			break;
		}
	tipc_link_retransmit(bcl, skb, mod(to - after));
	}
}

/**
@@ -231,13 +186,13 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
 *
 * Called with no locks taken
 */
void tipc_bclink_wakeup_users(void)
void tipc_bclink_wakeup_users(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct sk_buff *skb;

	while ((skb = skb_dequeue(&bclink->link.waiting_sks)))
		tipc_sk_rcv(skb);

	while ((skb = skb_dequeue(&tn->bclink->link.waiting_sks)))
		tipc_sk_rcv(net, skb);
}

/**
@@ -252,10 +207,12 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
	struct sk_buff *skb, *tmp;
	struct sk_buff *next;
	unsigned int released = 0;
	struct net *net = n_ptr->net;
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock();
	tipc_bclink_lock(net);
	/* Bail out if tx queue is empty (no clean up is required) */
	skb = skb_peek(&bcl->outqueue);
	skb = skb_peek(&tn->bcl->outqueue);
	if (!skb)
		goto exit;

@@ -266,43 +223,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
		if (bclink->bcast_nodes.count)
			acked = bcl->fsm_msg_cnt;
		if (tn->bclink->bcast_nodes.count)
			acked = tn->bcl->fsm_msg_cnt;
		else
			acked = bcl->next_out_no;
			acked = tn->bcl->next_out_no;
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
		if (less(acked, buf_seqno(skb)) ||
		    less(bcl->fsm_msg_cnt, acked) ||
		    less(tn->bcl->fsm_msg_cnt, acked) ||
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}

	/* Skip over packets that node has previously acknowledged */
	skb_queue_walk(&bcl->outqueue, skb) {
	skb_queue_walk(&tn->bcl->outqueue, skb) {
		if (more(buf_seqno(skb), n_ptr->bclink.acked))
			break;
	}

	/* Update packets that node is now acknowledging */
	skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
	skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
		if (more(buf_seqno(skb), acked))
			break;

		next = tipc_skb_queue_next(&bcl->outqueue, skb);
		if (skb != bcl->next_out) {
		next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
		if (skb != tn->bcl->next_out) {
			bcbuf_decr_acks(skb);
		} else {
			bcbuf_set_acks(skb, 0);
			bcl->next_out = next;
			bclink_set_last_sent();
			tn->bcl->next_out = next;
			bclink_set_last_sent(net);
		}

		if (bcbuf_acks(skb) == 0) {
			__skb_unlink(skb, &bcl->outqueue);
			__skb_unlink(skb, &tn->bcl->outqueue);
			kfree_skb(skb);
			released = 1;
		}
@@ -310,15 +267,15 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */
	if (unlikely(bcl->next_out)) {
		tipc_link_push_packets(bcl);
		bclink_set_last_sent();
	if (unlikely(tn->bcl->next_out)) {
		tipc_link_push_packets(tn->bcl);
		bclink_set_last_sent(net);
	}
	if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
	if (unlikely(released && !skb_queue_empty(&tn->bcl->waiting_sks)))
		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;

exit:
	tipc_bclink_unlock();
	tipc_bclink_unlock(net);
}

/**
@@ -326,9 +283,11 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
 *
 * RCU and node lock set
 */
void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
void tipc_bclink_update_link_state(struct net *net, struct tipc_node *n_ptr,
				   u32 last_sent)
{
	struct sk_buff *buf;
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	/* Ignore "stale" link state info */
	if (less_eq(last_sent, n_ptr->bclink.last_in))
@@ -358,18 +317,18 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;

		tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
		tipc_msg_init(net, msg, BCAST_PROTOCOL, STATE_MSG,
			      INT_H_SIZE, n_ptr->addr);
		msg_set_non_seq(msg, 1);
		msg_set_mc_netid(msg, tipc_net_id);
		msg_set_mc_netid(msg, tn->net_id);
		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_to(msg, to);

		tipc_bclink_lock();
		tipc_bearer_send(MAX_BEARERS, buf, NULL);
		bcl->stats.sent_nacks++;
		tipc_bclink_unlock();
		tipc_bclink_lock(net);
		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
		tn->bcl->stats.sent_nacks++;
		tipc_bclink_unlock(net);
		kfree_skb(buf);

		n_ptr->bclink.oos_state++;
@@ -382,9 +341,9 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
 * Delay any upcoming NACK by this node if another node has already
 * requested the first message this node is going to ask for.
 */
static void bclink_peek_nack(struct tipc_msg *msg)
static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
{
	struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));

	if (unlikely(!n_ptr))
		return;
@@ -401,12 +360,16 @@ static void bclink_peek_nack(struct tipc_msg *msg)

/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
 *                    and to identified node local sockets
 * @net: the applicable net namespace
 * @list: chain of buffers containing message
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
int tipc_bclink_xmit(struct sk_buff_head *list)
int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
	struct tipc_bclink *bclink = tn->bclink;
	int rc = 0;
	int bc = 0;
	struct sk_buff *skb;
@@ -420,19 +383,19 @@ int tipc_bclink_xmit(struct sk_buff_head *list)

	/* Broadcast to all other nodes */
	if (likely(bclink)) {
		tipc_bclink_lock();
		tipc_bclink_lock(net);
		if (likely(bclink->bcast_nodes.count)) {
			rc = __tipc_link_xmit(bcl, list);
			rc = __tipc_link_xmit(net, bcl, list);
			if (likely(!rc)) {
				u32 len = skb_queue_len(&bcl->outqueue);

				bclink_set_last_sent();
				bclink_set_last_sent(net);
				bcl->stats.queue_sz_counts++;
				bcl->stats.accu_queue_sz += len;
			}
			bc = 1;
		}
		tipc_bclink_unlock();
		tipc_bclink_unlock(net);
	}

	if (unlikely(!bc))
@@ -440,7 +403,7 @@ int tipc_bclink_xmit(struct sk_buff_head *list)

	/* Deliver message clone */
	if (likely(!rc))
		tipc_sk_mcast_rcv(skb);
		tipc_sk_mcast_rcv(net, skb);
	else
		kfree_skb(skb);

@@ -454,19 +417,21 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
 */
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
	struct tipc_net *tn = net_generic(node->net, tipc_net_id);

	bclink_update_last_sent(node, seqno);
	node->bclink.last_in = seqno;
	node->bclink.oos_state = 0;
	bcl->stats.recv_info++;
	tn->bcl->stats.recv_info++;

	/*
	 * Unicast an ACK periodically, ensuring that
	 * all nodes in the cluster don't ACK at the same time
	 */
	if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
		tipc_link_proto_xmit(node->active_links[node->addr & 1],
				     STATE_MSG, 0, 0, 0, 0, 0);
		bcl->stats.sent_acks++;
		tn->bcl->stats.sent_acks++;
	}
}

@@ -475,8 +440,10 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
 *
 * RCU is locked, no other locks set
 */
void tipc_bclink_rcv(struct sk_buff *buf)
void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
	struct tipc_msg *msg = buf_msg(buf);
	struct tipc_node *node;
	u32 next_in;
@@ -484,10 +451,10 @@ void tipc_bclink_rcv(struct sk_buff *buf)
	int deferred = 0;

	/* Screen out unwanted broadcast messages */
	if (msg_mc_netid(msg) != tipc_net_id)
	if (msg_mc_netid(msg) != tn->net_id)
		goto exit;

	node = tipc_node_find(msg_prevnode(msg));
	node = tipc_node_find(net, msg_prevnode(msg));
	if (unlikely(!node))
		goto exit;

@@ -499,18 +466,18 @@ void tipc_bclink_rcv(struct sk_buff *buf)
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
		if (msg_destnode(msg) == tipc_own_addr) {
		if (msg_destnode(msg) == tn->own_addr) {
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
			tipc_node_unlock(node);
			tipc_bclink_lock();
			tipc_bclink_lock(net);
			bcl->stats.recv_nacks++;
			bclink->retransmit_to = node;
			bclink_retransmit_pkt(msg_bcgap_after(msg),
			tn->bclink->retransmit_to = node;
			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
					      msg_bcgap_to(msg));
			tipc_bclink_unlock();
			tipc_bclink_unlock(net);
		} else {
			tipc_node_unlock(node);
			bclink_peek_nack(msg);
			bclink_peek_nack(net, msg);
		}
		goto exit;
	}
@@ -523,47 +490,47 @@ void tipc_bclink_rcv(struct sk_buff *buf)
receive:
		/* Deliver message to destination */
		if (likely(msg_isdata(msg))) {
			tipc_bclink_lock();
			tipc_bclink_lock(net);
			bclink_accept_pkt(node, seqno);
			tipc_bclink_unlock();
			tipc_bclink_unlock(net);
			tipc_node_unlock(node);
			if (likely(msg_mcast(msg)))
				tipc_sk_mcast_rcv(buf);
				tipc_sk_mcast_rcv(net, buf);
			else
				kfree_skb(buf);
		} else if (msg_user(msg) == MSG_BUNDLER) {
			tipc_bclink_lock();
			tipc_bclink_lock(net);
			bclink_accept_pkt(node, seqno);
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
			tipc_bclink_unlock();
			tipc_bclink_unlock(net);
			tipc_node_unlock(node);
			tipc_link_bundle_rcv(buf);
			tipc_link_bundle_rcv(net, buf);
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
			tipc_buf_append(&node->bclink.reasm_buf, &buf);
			if (unlikely(!buf && !node->bclink.reasm_buf))
				goto unlock;
			tipc_bclink_lock();
			tipc_bclink_lock(net);
			bclink_accept_pkt(node, seqno);
			bcl->stats.recv_fragments++;
			if (buf) {
				bcl->stats.recv_fragmented++;
				msg = buf_msg(buf);
				tipc_bclink_unlock();
				tipc_bclink_unlock(net);
				goto receive;
			}
			tipc_bclink_unlock();
			tipc_bclink_unlock(net);
			tipc_node_unlock(node);
		} else if (msg_user(msg) == NAME_DISTRIBUTOR) {
			tipc_bclink_lock();
			tipc_bclink_lock(net);
			bclink_accept_pkt(node, seqno);
			tipc_bclink_unlock();
			tipc_bclink_unlock(net);
			tipc_node_unlock(node);
			tipc_named_rcv(buf);
			tipc_named_rcv(net, buf);
		} else {
			tipc_bclink_lock();
			tipc_bclink_lock(net);
			bclink_accept_pkt(node, seqno);
			tipc_bclink_unlock();
			tipc_bclink_unlock(net);
			tipc_node_unlock(node);
			kfree_skb(buf);
		}
@@ -601,14 +568,14 @@ void tipc_bclink_rcv(struct sk_buff *buf)
		buf = NULL;
	}

	tipc_bclink_lock();
	tipc_bclink_lock(net);

	if (deferred)
		bcl->stats.deferred_recv++;
	else
		bcl->stats.duplicates++;

	tipc_bclink_unlock();
	tipc_bclink_unlock(net);

unlock:
	tipc_node_unlock(node);
@@ -619,7 +586,7 @@ void tipc_bclink_rcv(struct sk_buff *buf)
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
{
	return (n_ptr->bclink.recv_permitted &&
		(tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
}


@@ -632,11 +599,15 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
 */
static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
			      struct tipc_bearer *unused1,
			      struct tipc_media_addr *unused2)
{
	int bp_index;
	struct tipc_msg *msg = buf_msg(buf);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
	struct tipc_bclink *bclink = tn->bclink;

	/* Prepare broadcast link message for reliable transmission,
	 * if first time trying to send it;
@@ -646,8 +617,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
	if (likely(!msg_non_seq(buf_msg(buf)))) {
		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
		msg_set_non_seq(msg, 1);
		msg_set_mc_netid(msg, tipc_net_id);
		bcl->stats.sent_info++;
		msg_set_mc_netid(msg, tn->net_id);
		tn->bcl->stats.sent_info++;

		if (WARN_ON(!bclink->bcast_nodes.count)) {
			dump_stack();
@@ -676,13 +647,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,

		if (bp_index == 0) {
			/* Use original buffer for first bearer */
			tipc_bearer_send(b->identity, buf, &b->bcast_addr);
			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
		} else {
			/* Avoid concurrent buffer access */
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
			if (!tbuf)
				break;
			tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
			tipc_bearer_send(net, b->identity, tbuf,
					 &b->bcast_addr);
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}
		if (bcbearer->remains_new.count == 0)
@@ -697,15 +669,18 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
/**
 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
 */
void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
			u32 node, bool action)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
	struct tipc_bcbearer_pair *bp_curr;
	struct tipc_bearer *b;
	int b_index;
	int pri;

	tipc_bclink_lock();
	tipc_bclink_lock(net);

	if (action)
		tipc_nmap_add(nm_ptr, node);
@@ -717,7 +692,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)

	rcu_read_lock();
	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
		b = rcu_dereference_rtnl(bearer_list[b_index]);
		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
		if (!b || !b->nodes.count)
			continue;

@@ -752,7 +727,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
		bp_curr++;
	}

	tipc_bclink_unlock();
	tipc_bclink_unlock(net);
}

static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
@@ -806,17 +781,19 @@ static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
	return -EMSGSIZE;
}

int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

	if (!bcl)
		return 0;

	tipc_bclink_lock();
	tipc_bclink_lock(net);

	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
@@ -851,7 +828,7 @@ int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
	if (err)
		goto attr_msg_full;

	tipc_bclink_unlock();
	tipc_bclink_unlock(net);
	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

@@ -862,21 +839,23 @@ int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	tipc_bclink_unlock();
	tipc_bclink_unlock(net);
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

int tipc_bclink_stats(char *buf, const u32 buf_size)
int tipc_bclink_stats(struct net *net, char *buf, const u32 buf_size)
{
	int ret;
	struct tipc_stats *s;
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

	if (!bcl)
		return 0;

	tipc_bclink_lock();
	tipc_bclink_lock(net);

	s = &bcl->stats;

@@ -905,36 +884,47 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
			     s->queue_sz_counts ?
			     (s->accu_queue_sz / s->queue_sz_counts) : 0);

	tipc_bclink_unlock();
	tipc_bclink_unlock(net);
	return ret;
}

int tipc_bclink_reset_stats(void)
int tipc_bclink_reset_stats(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

	if (!bcl)
		return -ENOPROTOOPT;

	tipc_bclink_lock();
	tipc_bclink_lock(net);
	memset(&bcl->stats, 0, sizeof(bcl->stats));
	tipc_bclink_unlock();
	tipc_bclink_unlock(net);
	return 0;
}

int tipc_bclink_set_queue_limits(u32 limit)
int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

	if (!bcl)
		return -ENOPROTOOPT;
	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
		return -EINVAL;

	tipc_bclink_lock();
	tipc_bclink_lock(net);
	tipc_link_set_queue_limits(bcl, limit);
	tipc_bclink_unlock();
	tipc_bclink_unlock(net);
	return 0;
}

int tipc_bclink_init(void)
int tipc_bclink_init(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_bcbearer *bcbearer;
	struct tipc_bclink *bclink;
	struct tipc_link *bcl;

	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
	if (!bcbearer)
		return -ENOMEM;
@@ -958,25 +948,31 @@ int tipc_bclink_init(void)
	spin_lock_init(&bclink->node.lock);
	__skb_queue_head_init(&bclink->node.waiting_sks);
	bcl->owner = &bclink->node;
	bcl->owner->net = net;
	bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
	bcl->bearer_id = MAX_BEARERS;
	rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
	bcl->state = WORKING_WORKING;
	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
	tn->bcbearer = bcbearer;
	tn->bclink = bclink;
	tn->bcl = bcl;
	return 0;
}

void tipc_bclink_stop(void)
void tipc_bclink_stop(struct net *net)
{
	tipc_bclink_lock();
	tipc_link_purge_queues(bcl);
	tipc_bclink_unlock();
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
	tipc_bclink_lock(net);
	tipc_link_purge_queues(tn->bcl);
	tipc_bclink_unlock(net);

	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
	synchronize_net();
	kfree(bcbearer);
	kfree(bclink);
	kfree(tn->bcbearer);
	kfree(tn->bclink);
}

/**
Loading