Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a386bff8 authored by David S. Miller's avatar David S. Miller
Browse files


Paul Gortmaker says:

====================
The most interesting thing here, at least from a user perspective,
is the broadcast link fix -- where there was a corner case where
two endpoints could get in a state where they disagree on where
to start Rx and ack of broadcast packets.

There is also the poll/wait changes which could also impact
end users for certain use cases - the fixes there also better
align tipc with the rest of the networking code.

The rest largely falls into routine cleanup category, by getting
rid of some unused routines, some Kconfig clutter, etc.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2b916477 94fc9c47
Loading
Loading
Loading
Loading
+1 −12
Original line number Diff line number Diff line
@@ -20,18 +20,9 @@ menuconfig TIPC

	  If in doubt, say N.

if TIPC

config TIPC_ADVANCED
	bool "Advanced TIPC configuration"
	default n
	help
	  Saying Y here will open some advanced configuration for TIPC.
	  Most users do not need to bother; if unsure, just say N.

config TIPC_PORTS
	int "Maximum number of ports in a node"
	depends on TIPC_ADVANCED
	depends on TIPC
	range 127 65535
	default "8191"
	help
@@ -40,5 +31,3 @@ config TIPC_PORTS

	  Setting this to a smaller value saves some memory,
	  setting it to higher allows for more ports.

endif # TIPC
+12 −15
Original line number Diff line number Diff line
@@ -347,7 +347,7 @@ static void bclink_peek_nack(struct tipc_msg *msg)

	tipc_node_lock(n_ptr);

	if (n_ptr->bclink.supported &&
	if (n_ptr->bclink.recv_permitted &&
	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
		n_ptr->bclink.oos_state = 2;
@@ -429,7 +429,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
		goto exit;

	tipc_node_lock(node);
	if (unlikely(!node->bclink.supported))
	if (unlikely(!node->bclink.recv_permitted))
		goto unlock;

	/* Handle broadcast protocol message */
@@ -564,7 +564,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)

u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
{
	return (n_ptr->bclink.supported &&
	return (n_ptr->bclink.recv_permitted &&
		(tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
}

@@ -619,16 +619,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
		if (bcbearer->remains_new.count == bcbearer->remains.count)
			continue;	/* bearer pair doesn't add anything */

		if (p->blocked ||
		    p->media->send_msg(buf, p, &p->media->bcast_addr)) {
		if (!tipc_bearer_blocked(p))
			tipc_bearer_send(p, buf, &p->media->bcast_addr);
		else if (s && !tipc_bearer_blocked(s))
			/* unable to send on primary bearer */
			if (!s || s->blocked ||
			    s->media->send_msg(buf, s,
					       &s->media->bcast_addr)) {
			tipc_bearer_send(s, buf, &s->media->bcast_addr);
		else
			/* unable to send on either bearer */
			continue;
			}
		}

		if (s) {
			bcbearer->bpairs[bp_index].primary = s;
@@ -731,8 +729,8 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
			     "  TX naks:%u acks:%u dups:%u\n",
			     s->sent_nacks, s->sent_acks, s->retransmitted);
	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
			     s->bearer_congs, s->link_congs, s->max_queue_sz,
			     "  Congestion link:%u  Send queue max:%u avg:%u\n",
			     s->link_congs, s->max_queue_sz,
			     s->queue_sz_counts ?
			     (s->accu_queue_sz / s->queue_sz_counts) : 0);

@@ -766,7 +764,6 @@ int tipc_bclink_set_queue_limits(u32 limit)

void tipc_bclink_init(void)
{
	INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
	bcbearer->bearer.media = &bcbearer->media;
	bcbearer->media.send_msg = tipc_bcbearer_send;
	sprintf(bcbearer->media.name, "tipc-broadcast");
+11 −99
Original line number Diff line number Diff line
@@ -279,115 +279,30 @@ void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
}

/*
 * bearer_push(): Resolve bearer congestion. Force the waiting
 * links to push out their unsent packets, one packet per link
 * per iteration, until all packets are gone or congestion reoccurs.
 * 'tipc_net_lock' is read_locked when this function is called
 * bearer.lock must be taken before calling
 * Returns binary true(1) ore false(0)
 */
static int bearer_push(struct tipc_bearer *b_ptr)
{
	u32 res = 0;
	struct tipc_link *ln, *tln;

	if (b_ptr->blocked)
		return 0;

	while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
		list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
			res = tipc_link_push_packet(ln);
			if (res == PUSH_FAILED)
				break;
			if (res == PUSH_FINISHED)
				list_move_tail(&ln->link_list, &b_ptr->links);
		}
	}
	return list_empty(&b_ptr->cong_links);
}

void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
{
	spin_lock_bh(&b_ptr->lock);
	bearer_push(b_ptr);
	spin_unlock_bh(&b_ptr->lock);
}


/*
 * Interrupt enabling new requests after bearer congestion or blocking:
 * Interrupt enabling new requests after bearer blocking:
 * See bearer_send().
 */
void tipc_continue(struct tipc_bearer *b_ptr)
void tipc_continue(struct tipc_bearer *b)
{
	spin_lock_bh(&b_ptr->lock);
	if (!list_empty(&b_ptr->cong_links))
		tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
	b_ptr->blocked = 0;
	spin_unlock_bh(&b_ptr->lock);
	spin_lock_bh(&b->lock);
	b->blocked = 0;
	spin_unlock_bh(&b->lock);
}

/*
 * Schedule link for sending of messages after the bearer
 * has been deblocked by 'continue()'. This method is called
 * when somebody tries to send a message via this link while
 * the bearer is congested. 'tipc_net_lock' is in read_lock here
 * bearer.lock is busy
 * tipc_bearer_blocked - determines if bearer is currently blocked
 */
static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
						struct tipc_link *l_ptr)
int tipc_bearer_blocked(struct tipc_bearer *b)
{
	list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
}

/*
 * Schedule link for sending of messages after the bearer
 * has been deblocked by 'continue()'. This method is called
 * when somebody tries to send a message via this link while
 * the bearer is congested. 'tipc_net_lock' is in read_lock here,
 * bearer.lock is free
 */
void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
{
	spin_lock_bh(&b_ptr->lock);
	tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
	spin_unlock_bh(&b_ptr->lock);
}
	int res;

	spin_lock_bh(&b->lock);
	res = b->blocked;
	spin_unlock_bh(&b->lock);

/*
 * tipc_bearer_resolve_congestion(): Check if there is bearer congestion,
 * and if there is, try to resolve it before returning.
 * 'tipc_net_lock' is read_locked when this function is called
 */
int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
					struct tipc_link *l_ptr)
{
	int res = 1;

	if (list_empty(&b_ptr->cong_links))
		return 1;
	spin_lock_bh(&b_ptr->lock);
	if (!bearer_push(b_ptr)) {
		tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
		res = 0;
	}
	spin_unlock_bh(&b_ptr->lock);
	return res;
}

/**
 * tipc_bearer_congested - determines if bearer is currently congested
 */
int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
{
	if (unlikely(b_ptr->blocked))
		return 1;
	if (likely(list_empty(&b_ptr->cong_links)))
		return 0;
	return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
}

/**
 * tipc_enable_bearer - enable bearer with the given name
 */
@@ -489,7 +404,6 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
	b_ptr->net_plane = bearer_id + 'A';
	b_ptr->active = 1;
	b_ptr->priority = priority;
	INIT_LIST_HEAD(&b_ptr->cong_links);
	INIT_LIST_HEAD(&b_ptr->links);
	spin_lock_init(&b_ptr->lock);

@@ -528,7 +442,6 @@ int tipc_block_bearer(const char *name)
	pr_info("Blocking bearer <%s>\n", name);
	spin_lock_bh(&b_ptr->lock);
	b_ptr->blocked = 1;
	list_splice_init(&b_ptr->cong_links, &b_ptr->links);
	list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
		struct tipc_node *n_ptr = l_ptr->owner;

@@ -555,7 +468,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
	spin_lock_bh(&b_ptr->lock);
	b_ptr->blocked = 1;
	b_ptr->media->disable_bearer(b_ptr);
	list_splice_init(&b_ptr->cong_links, &b_ptr->links);
	list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
		tipc_link_delete(l_ptr);
	}
+3 −21
Original line number Diff line number Diff line
@@ -120,7 +120,6 @@ struct tipc_media {
 * @identity: array index of this bearer within TIPC bearer array
 * @link_req: ptr to (optional) structure making periodic link setup requests
 * @links: list of non-congested links associated with bearer
 * @cong_links: list of congested links associated with bearer
 * @active: non-zero if bearer structure is represents a bearer
 * @net_plane: network plane ('A' through 'H') currently associated with bearer
 * @nodes: indicates which nodes in cluster can be reached through bearer
@@ -143,7 +142,6 @@ struct tipc_bearer {
	u32 identity;
	struct tipc_link_req *link_req;
	struct list_head links;
	struct list_head cong_links;
	int active;
	char net_plane;
	struct tipc_node_map nodes;
@@ -185,39 +183,23 @@ struct sk_buff *tipc_media_get_names(void);
struct sk_buff *tipc_bearer_get_names(void);
void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
struct tipc_bearer *tipc_bearer_find(const char *name);
struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
struct tipc_media *tipc_media_find(const char *name);
int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
				   struct tipc_link *l_ptr);
int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
int tipc_bearer_blocked(struct tipc_bearer *b_ptr);
void tipc_bearer_stop(void);
void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);


/**
 * tipc_bearer_send- sends buffer to destination over bearer
 *
 * Returns true (1) if successful, or false (0) if unable to send
 *
 * IMPORTANT:
 * The media send routine must not alter the buffer being passed in
 * as it may be needed for later retransmission!
 *
 * If the media send routine returns a non-zero value (indicating that
 * it was unable to send the buffer), it must:
 *   1) mark the bearer as blocked,
 *   2) call tipc_continue() once the bearer is able to send again.
 * Media types that are unable to meet these two critera must ensure their
 * send routine always returns success -- even if the buffer was not sent --
 * and let TIPC's link code deal with the undelivered message.
 */
static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
				   struct sk_buff *buf,
static inline void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf,
				   struct tipc_media_addr *dest)
{
	return !b_ptr->media->send_msg(buf, b_ptr, dest);
	b->media->send_msg(buf, b, dest);
}

#endif	/* _TIPC_BEARER_H */
+0 −5
Original line number Diff line number Diff line
@@ -42,11 +42,6 @@

#include <linux/module.h>

#ifndef CONFIG_TIPC_PORTS
#define CONFIG_TIPC_PORTS 8191
#endif


/* global variables used by multiple sub-systems within TIPC */
int tipc_random __read_mostly;

Loading