Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 656edac6 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'sctp'



Wang Weidong says:

====================
sctp: remove some macro locking wrappers

In sctp.h we can find some macro locking wrappers. As Neil point out that:

"Its because in the origional implementation of the sctp protocol, there was a
user space test harness which built the kernel module for userspace execution to
cary our some unit testing on the code.  It did so by redefining some of those
locking macros to user space friendly code.  IIRC we haven't use those unit
tests in years, and so should be removing them, not adding them to other
locations."

So I remove them.
====================

Acked-by: default avatarNeil Horman <nhorman@tuxdriver.com>
Acked-by: default avatarVlad Yasevich <vyasevich@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d08f161a 5bc1d1b4
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -713,11 +713,11 @@ static void process_sctp_notification(struct connection *con,
				return;

			/* Peel off a new sock */
			sctp_lock_sock(con->sock->sk);
			lock_sock(con->sock->sk);
			ret = sctp_do_peeloff(con->sock->sk,
				sn->sn_assoc_change.sac_assoc_id,
				&new_con->sock);
			sctp_release_sock(con->sock->sk);
			release_sock(con->sock->sk);
			if (ret < 0) {
				log_print("Can't peel off a socket for "
					  "connection %d to node %d: err=%d",
+4 −23
Original line number Diff line number Diff line
@@ -170,25 +170,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
 *  Section:  Macros, externs, and inlines
 */

/* spin lock wrappers. */
#define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
#define sctp_spin_unlock_irqrestore(lock, flags)  \
       spin_unlock_irqrestore(lock, flags)
#define sctp_local_bh_disable() local_bh_disable()
#define sctp_local_bh_enable()  local_bh_enable()
#define sctp_spin_lock(lock)    spin_lock(lock)
#define sctp_spin_unlock(lock)  spin_unlock(lock)
#define sctp_write_lock(lock)   write_lock(lock)
#define sctp_write_unlock(lock) write_unlock(lock)
#define sctp_read_lock(lock)    read_lock(lock)
#define sctp_read_unlock(lock)  read_unlock(lock)

/* sock lock wrappers. */
#define sctp_lock_sock(sk)       lock_sock(sk)
#define sctp_release_sock(sk)    release_sock(sk)
#define sctp_bh_lock_sock(sk)    bh_lock_sock(sk)
#define sctp_bh_unlock_sock(sk)  bh_unlock_sock(sk)

/* SCTP SNMP MIB stats handlers */
#define SCTP_INC_STATS(net, field)      SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
#define SCTP_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
@@ -353,13 +334,13 @@ static inline void sctp_skb_list_tail(struct sk_buff_head *list,
{
	unsigned long flags;

	sctp_spin_lock_irqsave(&head->lock, flags);
	sctp_spin_lock(&list->lock);
	spin_lock_irqsave(&head->lock, flags);
	spin_lock(&list->lock);

	skb_queue_splice_tail_init(list, head);

	sctp_spin_unlock(&list->lock);
	sctp_spin_unlock_irqrestore(&head->lock, flags);
	spin_unlock(&list->lock);
	spin_unlock_irqrestore(&head->lock, flags);
}

/**
+2 −2
Original line number Diff line number Diff line
@@ -368,9 +368,9 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
{
	struct sctp_association *asoc;

	sctp_local_bh_disable();
	local_bh_disable();
	asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport);
	sctp_local_bh_enable();
	local_bh_enable();

	return asoc;
}
+27 −27
Original line number Diff line number Diff line
@@ -238,7 +238,7 @@ int sctp_rcv(struct sk_buff *skb)
	 * bottom halves on this lock, but a user may be in the lock too,
	 * so check if it is busy.
	 */
	sctp_bh_lock_sock(sk);
	bh_lock_sock(sk);

	if (sk != rcvr->sk) {
		/* Our cached sk is different from the rcvr->sk.  This is
@@ -248,14 +248,14 @@ int sctp_rcv(struct sk_buff *skb)
		 * be doing something with the new socket.  Switch our veiw
		 * of the current sk.
		 */
		sctp_bh_unlock_sock(sk);
		bh_unlock_sock(sk);
		sk = rcvr->sk;
		sctp_bh_lock_sock(sk);
		bh_lock_sock(sk);
	}

	if (sock_owned_by_user(sk)) {
		if (sctp_add_backlog(sk, skb)) {
			sctp_bh_unlock_sock(sk);
			bh_unlock_sock(sk);
			sctp_chunk_free(chunk);
			skb = NULL; /* sctp_chunk_free already freed the skb */
			goto discard_release;
@@ -266,7 +266,7 @@ int sctp_rcv(struct sk_buff *skb)
		sctp_inq_push(&chunk->rcvr->inqueue, chunk);
	}

	sctp_bh_unlock_sock(sk);
	bh_unlock_sock(sk);

	/* Release the asoc/ep ref we took in the lookup calls. */
	if (asoc)
@@ -327,7 +327,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
		 */

		sk = rcvr->sk;
		sctp_bh_lock_sock(sk);
		bh_lock_sock(sk);

		if (sock_owned_by_user(sk)) {
			if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
@@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
		} else
			sctp_inq_push(inqueue, chunk);

		sctp_bh_unlock_sock(sk);
		bh_unlock_sock(sk);

		/* If the chunk was backloged again, don't drop refs */
		if (backloged)
@@ -522,7 +522,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
		goto out;
	}

	sctp_bh_lock_sock(sk);
	bh_lock_sock(sk);

	/* If too many ICMPs get dropped on busy
	 * servers this needs to be solved differently.
@@ -542,7 +542,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
/* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{
	sctp_bh_unlock_sock(sk);
	bh_unlock_sock(sk);
	sctp_association_put(asoc);
}

@@ -718,17 +718,17 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
	epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
	head = &sctp_ep_hashtable[epb->hashent];

	sctp_write_lock(&head->lock);
	write_lock(&head->lock);
	hlist_add_head(&epb->node, &head->chain);
	sctp_write_unlock(&head->lock);
	write_unlock(&head->lock);
}

/* Add an endpoint to the hash. Local BH-safe. */
void sctp_hash_endpoint(struct sctp_endpoint *ep)
{
	sctp_local_bh_disable();
	local_bh_disable();
	__sctp_hash_endpoint(ep);
	sctp_local_bh_enable();
	local_bh_enable();
}

/* Remove endpoint from the hash table.  */
@@ -744,17 +744,17 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)

	head = &sctp_ep_hashtable[epb->hashent];

	sctp_write_lock(&head->lock);
	write_lock(&head->lock);
	hlist_del_init(&epb->node);
	sctp_write_unlock(&head->lock);
	write_unlock(&head->lock);
}

/* Remove endpoint from the hash.  Local BH-safe. */
void sctp_unhash_endpoint(struct sctp_endpoint *ep)
{
	sctp_local_bh_disable();
	local_bh_disable();
	__sctp_unhash_endpoint(ep);
	sctp_local_bh_enable();
	local_bh_enable();
}

/* Look up an endpoint. */
@@ -798,9 +798,9 @@ static void __sctp_hash_established(struct sctp_association *asoc)

	head = &sctp_assoc_hashtable[epb->hashent];

	sctp_write_lock(&head->lock);
	write_lock(&head->lock);
	hlist_add_head(&epb->node, &head->chain);
	sctp_write_unlock(&head->lock);
	write_unlock(&head->lock);
}

/* Add an association to the hash. Local BH-safe. */
@@ -809,9 +809,9 @@ void sctp_hash_established(struct sctp_association *asoc)
	if (asoc->temp)
		return;

	sctp_local_bh_disable();
	local_bh_disable();
	__sctp_hash_established(asoc);
	sctp_local_bh_enable();
	local_bh_enable();
}

/* Remove association from the hash table.  */
@@ -828,9 +828,9 @@ static void __sctp_unhash_established(struct sctp_association *asoc)

	head = &sctp_assoc_hashtable[epb->hashent];

	sctp_write_lock(&head->lock);
	write_lock(&head->lock);
	hlist_del_init(&epb->node);
	sctp_write_unlock(&head->lock);
	write_unlock(&head->lock);
}

/* Remove association from the hash table.  Local BH-safe. */
@@ -839,9 +839,9 @@ void sctp_unhash_established(struct sctp_association *asoc)
	if (asoc->temp)
		return;

	sctp_local_bh_disable();
	local_bh_disable();
	__sctp_unhash_established(asoc);
	sctp_local_bh_enable();
	local_bh_enable();
}

/* Look up an association. */
@@ -891,9 +891,9 @@ struct sctp_association *sctp_lookup_association(struct net *net,
{
	struct sctp_association *asoc;

	sctp_local_bh_disable();
	local_bh_disable();
	asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
	sctp_local_bh_enable();
	local_bh_enable();

	return asoc;
}
+6 −6
Original line number Diff line number Diff line
@@ -218,7 +218,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
		return -ENOMEM;

	head = &sctp_ep_hashtable[hash];
	sctp_local_bh_disable();
	local_bh_disable();
	read_lock(&head->lock);
	sctp_for_each_hentry(epb, &head->chain) {
		ep = sctp_ep(epb);
@@ -235,7 +235,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
		seq_printf(seq, "\n");
	}
	read_unlock(&head->lock);
	sctp_local_bh_enable();
	local_bh_enable();

	return 0;
}
@@ -326,7 +326,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
		return -ENOMEM;

	head = &sctp_assoc_hashtable[hash];
	sctp_local_bh_disable();
	local_bh_disable();
	read_lock(&head->lock);
	sctp_for_each_hentry(epb, &head->chain) {
		assoc = sctp_assoc(epb);
@@ -362,7 +362,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
		seq_printf(seq, "\n");
	}
	read_unlock(&head->lock);
	sctp_local_bh_enable();
	local_bh_enable();

	return 0;
}
@@ -446,7 +446,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
		return -ENOMEM;

	head = &sctp_assoc_hashtable[hash];
	sctp_local_bh_disable();
	local_bh_disable();
	read_lock(&head->lock);
	rcu_read_lock();
	sctp_for_each_hentry(epb, &head->chain) {
@@ -505,7 +505,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)

	rcu_read_unlock();
	read_unlock(&head->lock);
	sctp_local_bh_enable();
	local_bh_enable();

	return 0;

Loading