Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6418829 authored by Hoang Le's avatar Hoang Le Committed by Greg Kroah-Hartman
Browse files

tipc: improve throughput between nodes in netns



[ Upstream commit f73b12812a3d1d798b7517547ccdcf864844d2cd ]

Currently, TIPC transports intra-node user data messages directly
socket to socket, hence shortcutting all the lower layers of the
communication stack. This gives TIPC very good intra node performance,
both regarding throughput and latency.

We now introduce a similar mechanism for TIPC data traffic across
network namespaces located in the same kernel. On the send path, the
call chain is as always accompanied by the sending node's network name
space pointer. However, once we have reliably established that the
receiving node is represented by a namespace on the same host, we just
replace the namespace pointer with the receiving node/namespace's
ditto, and follow the regular socket receive patch though the receiving
node. This technique gives us a throughput similar to the node internal
throughput, several times larger than if we let the traffic go though
the full network stacks. As a comparison, max throughput for 64k
messages is four times larger than TCP throughput for the same type of
traffic.

To meet any security concerns, the following should be noted.

- All nodes joining a cluster are supposed to have been be certified
and authenticated by mechanisms outside TIPC. This is no different for
nodes/namespaces on the same host; they have to auto discover each
other using the attached interfaces, and establish links which are
supervised via the regular link monitoring mechanism. Hence, a kernel
local node has no other way to join a cluster than any other node, and
have to obey to policies set in the IP or device layers of the stack.

- Only when a sender has established with 100% certainty that the peer
node is located in a kernel local namespace does it choose to let user
data messages, and only those, take the crossover path to the receiving
node/namespace.

- If the receiving node/namespace is removed, its namespace pointer
is invalidated at all peer nodes, and their neighbor link monitoring
will eventually note that this node is gone.

- To ensure the "100% certainty" criteria, and prevent any possible
spoofing, received discovery messages must contain a proof that the
sender knows a common secret. We use the hash mix of the sending
node/namespace for this purpose, since it can be accessed directly by
all other namespaces in the kernel. Upon reception of a discovery
message, the receiver checks this proof against all the local
namespaces'hash_mix:es. If it finds a match, that, along with a
matching node id and cluster id, this is deemed sufficient proof that
the peer node in question is in a local namespace, and a wormhole can
be opened.

- We should also consider that TIPC is intended to be a cluster local
IPC mechanism (just like e.g. UNIX sockets) rather than a network
protocol, and hence we think it can justified to allow it to shortcut the
lower protocol layers.

Regarding traceability, we should notice that since commit 6c9081a3
("tipc: add loopback device tracking") it is possible to follow the node
internal packet flow by just activating tcpdump on the loopback
interface. This will be true even for this mechanism; by activating
tcpdump on the involved nodes' loopback interfaces their inter-name
space messaging can easily be tracked.

v2:
- update 'net' pointer when node left/rejoined
v3:
- grab read/write lock when using node ref obj
v4:
- clone traffics between netns to loopback

Suggested-by: default avatarJon Maloy <jon.maloy@ericsson.com>
Acked-by: default avatarJon Maloy <jon.maloy@ericsson.com>
Signed-off-by: default avatarHoang Le <hoang.h.le@dektech.com.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Stable-dep-of: c244c092f1ed ("tipc: fix unexpected link reset due to discovery messages")
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent d443308e
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -112,6 +112,15 @@ static void __net_exit tipc_exit_net(struct net *net)
		cond_resched();
}

static void __net_exit tipc_pernet_pre_exit(struct net *net)
{
	tipc_node_pre_cleanup_net(net);
}

static struct pernet_operations tipc_pernet_pre_exit_ops = {
	.pre_exit = tipc_pernet_pre_exit,
};

static struct pernet_operations tipc_net_ops = {
	.init = tipc_init_net,
	.exit = tipc_exit_net,
@@ -150,6 +159,10 @@ static int __init tipc_init(void)
	if (err)
		goto out_pernet_topsrv;

	err = register_pernet_subsys(&tipc_pernet_pre_exit_ops);
	if (err)
		goto out_register_pernet_subsys;

	err = tipc_bearer_setup();
	if (err)
		goto out_bearer;
@@ -170,6 +183,8 @@ static int __init tipc_init(void)
out_netlink:
	tipc_bearer_cleanup();
out_bearer:
	unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
out_register_pernet_subsys:
	unregister_pernet_device(&tipc_topsrv_net_ops);
out_pernet_topsrv:
	tipc_socket_stop();
@@ -187,6 +202,7 @@ static void __exit tipc_exit(void)
	tipc_netlink_compat_stop();
	tipc_netlink_stop();
	tipc_bearer_cleanup();
	unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
	unregister_pernet_device(&tipc_topsrv_net_ops);
	tipc_socket_stop();
	unregister_pernet_device(&tipc_net_ops);
+6 −0
Original line number Diff line number Diff line
@@ -59,6 +59,7 @@
#include <net/netns/generic.h>
#include <linux/rhashtable.h>
#include <net/genetlink.h>
#include <net/netns/hash.h>

#ifdef pr_fmt
#undef pr_fmt
@@ -202,6 +203,11 @@ static inline int in_range(u16 val, u16 min, u16 max)
	return !less(val, min) && !more(val, max);
}

static inline u32 tipc_net_hash_mixes(struct net *net, int tn_rand)
{
	return net_hash_mix(&init_net) ^ net_hash_mix(net) ^ tn_rand;
}

#ifdef CONFIG_SYSCTL
int tipc_register_sysctl(void);
void tipc_unregister_sysctl(void);
+3 −1
Original line number Diff line number Diff line
@@ -94,6 +94,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
	msg_set_dest_domain(hdr, dest_domain);
	msg_set_bc_netid(hdr, tn->net_id);
	b->media->addr2msg(msg_media_addr(hdr), &b->addr);
	msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random));
	msg_set_node_id(hdr, tipc_own_id(net));
}

@@ -245,7 +246,8 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
	if (!tipc_in_scope(legacy, b->domain, src))
		return;
	tipc_node_check_dest(net, src, peer_id, b, caps, signature,
			     &maddr, &respond, &dupl_addr);
			     msg_peer_net_hash(hdr), &maddr, &respond,
			     &dupl_addr);
	if (dupl_addr)
		disc_dupl_alert(b, src, &maddr);
	if (!respond)
+14 −0
Original line number Diff line number Diff line
@@ -1026,6 +1026,20 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
	return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
}

/* Word 13
 */
static inline void msg_set_peer_net_hash(struct tipc_msg *m, u32 n)
{
	msg_set_word(m, 13, n);
}

static inline u32 msg_peer_net_hash(struct tipc_msg *m)
{
	return msg_word(m, 13);
}

/* Word 14
 */
static inline u32 msg_sugg_node_addr(struct tipc_msg *m)
{
	return msg_word(m, 14);
+1 −1
Original line number Diff line number Diff line
@@ -146,7 +146,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
	struct publication *publ;
	struct sk_buff *skb = NULL;
	struct distr_item *item = NULL;
	u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
	u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
			ITEM_SIZE) * ITEM_SIZE;
	u32 msg_rem = msg_dsz;

Loading