Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 87553aa5 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'tcp_bh_fixes'



Eric Dumazet says:

====================
net: block BH in TCP callbacks

Four layers using TCP stack were assuming sk_callback_lock could
be locked using read_lock() in their handlers because TCP stack
was running with BH disabled.

This is no longer the case. Since presumably the rest could
also depend on BH being disabled, just use read_lock_bh().

Then each layer might consider switching to RCU protection
and no longer depend on BH.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e00be9e4 b91083a4
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -131,10 +131,10 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
	struct iscsi_tcp_conn *tcp_conn;
	read_descriptor_t rd_desc;

	read_lock(&sk->sk_callback_lock);
	read_lock_bh(&sk->sk_callback_lock);
	conn = sk->sk_user_data;
	if (!conn) {
		read_unlock(&sk->sk_callback_lock);
		read_unlock_bh(&sk->sk_callback_lock);
		return;
	}
	tcp_conn = conn->dd_data;
@@ -154,7 +154,7 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
	/* If we had to (atomically) map a highmem page,
	 * unmap it now. */
	iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
	read_unlock(&sk->sk_callback_lock);
	read_unlock_bh(&sk->sk_callback_lock);
}

static void iscsi_sw_tcp_state_change(struct sock *sk)
@@ -165,10 +165,10 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
	struct iscsi_session *session;
	void (*old_state_change)(struct sock *);

	read_lock(&sk->sk_callback_lock);
	read_lock_bh(&sk->sk_callback_lock);
	conn = sk->sk_user_data;
	if (!conn) {
		read_unlock(&sk->sk_callback_lock);
		read_unlock_bh(&sk->sk_callback_lock);
		return;
	}
	session = conn->session;
@@ -179,7 +179,7 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
	tcp_sw_conn = tcp_conn->dd_data;
	old_state_change = tcp_sw_conn->old_state_change;

	read_unlock(&sk->sk_callback_lock);
	read_unlock_bh(&sk->sk_callback_lock);

	old_state_change(sk);
}
+9 −8
Original line number Diff line number Diff line
@@ -600,10 +600,11 @@ static void o2net_set_nn_state(struct o2net_node *nn,
static void o2net_data_ready(struct sock *sk)
{
	void (*ready)(struct sock *sk);
	struct o2net_sock_container *sc;

	read_lock(&sk->sk_callback_lock);
	if (sk->sk_user_data) {
		struct o2net_sock_container *sc = sk->sk_user_data;
	read_lock_bh(&sk->sk_callback_lock);
	sc = sk->sk_user_data;
	if (sc) {
		sclog(sc, "data_ready hit\n");
		o2net_set_data_ready_time(sc);
		o2net_sc_queue_work(sc, &sc->sc_rx_work);
@@ -611,7 +612,7 @@ static void o2net_data_ready(struct sock *sk)
	} else {
		ready = sk->sk_data_ready;
	}
	read_unlock(&sk->sk_callback_lock);
	read_unlock_bh(&sk->sk_callback_lock);

	ready(sk);
}
@@ -622,7 +623,7 @@ static void o2net_state_change(struct sock *sk)
	void (*state_change)(struct sock *sk);
	struct o2net_sock_container *sc;

	read_lock(&sk->sk_callback_lock);
	read_lock_bh(&sk->sk_callback_lock);
	sc = sk->sk_user_data;
	if (sc == NULL) {
		state_change = sk->sk_state_change;
@@ -649,7 +650,7 @@ static void o2net_state_change(struct sock *sk)
		break;
	}
out:
	read_unlock(&sk->sk_callback_lock);
	read_unlock_bh(&sk->sk_callback_lock);
	state_change(sk);
}

@@ -2012,7 +2013,7 @@ static void o2net_listen_data_ready(struct sock *sk)
{
	void (*ready)(struct sock *sk);

	read_lock(&sk->sk_callback_lock);
	read_lock_bh(&sk->sk_callback_lock);
	ready = sk->sk_user_data;
	if (ready == NULL) { /* check for teardown race */
		ready = sk->sk_data_ready;
@@ -2039,7 +2040,7 @@ static void o2net_listen_data_ready(struct sock *sk)
	}

out:
	read_unlock(&sk->sk_callback_lock);
	read_unlock_bh(&sk->sk_callback_lock);
	if (ready != NULL)
		ready(sk);
}
+2 −2
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
	struct rds_connection *conn;
	struct rds_tcp_connection *tc;

	read_lock(&sk->sk_callback_lock);
	read_lock_bh(&sk->sk_callback_lock);
	conn = sk->sk_user_data;
	if (!conn) {
		state_change = sk->sk_state_change;
@@ -69,7 +69,7 @@ void rds_tcp_state_change(struct sock *sk)
			break;
	}
out:
	read_unlock(&sk->sk_callback_lock);
	read_unlock_bh(&sk->sk_callback_lock);
	state_change(sk);
}

+2 −2
Original line number Diff line number Diff line
@@ -166,7 +166,7 @@ void rds_tcp_listen_data_ready(struct sock *sk)

	rdsdebug("listen data ready sk %p\n", sk);

	read_lock(&sk->sk_callback_lock);
	read_lock_bh(&sk->sk_callback_lock);
	ready = sk->sk_user_data;
	if (!ready) { /* check for teardown race */
		ready = sk->sk_data_ready;
@@ -183,7 +183,7 @@ void rds_tcp_listen_data_ready(struct sock *sk)
		rds_tcp_accept_work(sk);

out:
	read_unlock(&sk->sk_callback_lock);
	read_unlock_bh(&sk->sk_callback_lock);
	ready(sk);
}

+2 −2
Original line number Diff line number Diff line
@@ -301,7 +301,7 @@ void rds_tcp_data_ready(struct sock *sk)

	rdsdebug("data ready sk %p\n", sk);

	read_lock(&sk->sk_callback_lock);
	read_lock_bh(&sk->sk_callback_lock);
	conn = sk->sk_user_data;
	if (!conn) { /* check for teardown race */
		ready = sk->sk_data_ready;
@@ -315,7 +315,7 @@ void rds_tcp_data_ready(struct sock *sk)
	if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
		queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
out:
	read_unlock(&sk->sk_callback_lock);
	read_unlock_bh(&sk->sk_callback_lock);
	ready(sk);
}

Loading