Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f221dcd9 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-smc-next'



Ursula Braun says:

====================
net/smc: patches for net-next

here are some patches for net/smc. Most important are
improvements for socket closing.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c02b7a91 2c9c1682
Loading
Loading
Loading
Loading
+15 −8
Original line number Diff line number Diff line
@@ -147,7 +147,6 @@ static int smc_release(struct socket *sock)
		schedule_delayed_work(&smc->sock_put_work,
				      SMC_CLOSE_SOCK_PUT_DELAY);
	}
	sk->sk_prot->unhash(sk);
	release_sock(sk);

	sock_put(sk);
@@ -451,6 +450,9 @@ static int smc_connect_rdma(struct smc_sock *smc)
		goto decline_rdma_unlock;
	}

	smc_close_init(smc);
	smc_rx_init(smc);

	if (local_contact == SMC_FIRST_CONTACT) {
		rc = smc_ib_ready_link(link);
		if (rc) {
@@ -477,7 +479,6 @@ static int smc_connect_rdma(struct smc_sock *smc)

	mutex_unlock(&smc_create_lgr_pending);
	smc_tx_init(smc);
	smc_rx_init(smc);

out_connected:
	smc_copy_sock_settings_to_clc(smc);
@@ -637,7 +638,8 @@ struct sock *smc_accept_dequeue(struct sock *parent,

		smc_accept_unlink(new_sk);
		if (new_sk->sk_state == SMC_CLOSED) {
			/* tbd in follow-on patch: close this sock */
			new_sk->sk_prot->unhash(new_sk);
			sock_put(new_sk);
			continue;
		}
		if (new_sock)
@@ -657,8 +659,13 @@ void smc_close_non_accepted(struct sock *sk)
	if (!sk->sk_lingertime)
		/* wait for peer closing */
		sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
	if (!smc->use_fallback)
	if (smc->use_fallback) {
		sk->sk_state = SMC_CLOSED;
	} else {
		smc_close_active(smc);
		sock_set_flag(sk, SOCK_DEAD);
		sk->sk_shutdown |= SHUTDOWN_MASK;
	}
	if (smc->clcsock) {
		struct socket *tcp;

@@ -666,11 +673,9 @@ void smc_close_non_accepted(struct sock *sk)
		smc->clcsock = NULL;
		sock_release(tcp);
	}
	sock_set_flag(sk, SOCK_DEAD);
	sk->sk_shutdown |= SHUTDOWN_MASK;
	if (smc->use_fallback) {
		schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
	} else {
	} else if (sk->sk_state == SMC_CLOSED) {
		smc_conn_free(&smc->conn);
		schedule_delayed_work(&smc->sock_put_work,
				      SMC_CLOSE_SOCK_PUT_DELAY);
@@ -800,6 +805,9 @@ static void smc_listen_work(struct work_struct *work)
		goto decline_rdma;
	}

	smc_close_init(new_smc);
	smc_rx_init(new_smc);

	rc = smc_clc_send_accept(new_smc, local_contact);
	if (rc)
		goto out_err;
@@ -839,7 +847,6 @@ static void smc_listen_work(struct work_struct *work)
	}

	smc_tx_init(new_smc);
	smc_rx_init(new_smc);

out_connected:
	sk_refcnt_debug_inc(newsmcsk);
+1 −0
Original line number Diff line number Diff line
@@ -164,6 +164,7 @@ struct smc_connection {
#ifndef KERNEL_HAS_ATOMIC64
	spinlock_t		acurs_lock;	/* protect cursors */
#endif
	struct work_struct	close_work;	/* peer sent some closing */
};

struct smc_sock {				/* smc sock container */
+7 −4
Original line number Diff line number Diff line
@@ -217,8 +217,13 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
		smc->sk.sk_err = ECONNRESET;
		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
	}
	if (smc_cdc_rxed_any_close_or_senddone(conn))
		smc_close_passive_received(smc);
	if (smc_cdc_rxed_any_close_or_senddone(conn)) {
		smc->sk.sk_shutdown |= RCV_SHUTDOWN;
		if (smc->clcsock && smc->clcsock->sk)
			smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
		sock_set_flag(&smc->sk, SOCK_DONE);
		schedule_work(&conn->close_work);
	}

	/* piggy backed tx info */
	/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
@@ -228,8 +233,6 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
		smc_close_wake_tx_prepared(smc);
	}

	/* subsequent patch: trigger socket release if connection closed */

	/* socket connected but not accepted */
	if (!smc->sk.sk_socket)
		return;
+49 −25
Original line number Diff line number Diff line
@@ -117,7 +117,6 @@ void smc_close_active_abort(struct smc_sock *smc)
	struct smc_cdc_conn_state_flags *txflags =
		&smc->conn.local_tx_ctrl.conn_state_flags;

	bh_lock_sock(&smc->sk);
	smc->sk.sk_err = ECONNABORTED;
	if (smc->clcsock && smc->clcsock->sk) {
		smc->clcsock->sk->sk_err = ECONNABORTED;
@@ -125,6 +124,7 @@ void smc_close_active_abort(struct smc_sock *smc)
	}
	switch (smc->sk.sk_state) {
	case SMC_INIT:
	case SMC_ACTIVE:
		smc->sk.sk_state = SMC_PEERABORTWAIT;
		break;
	case SMC_APPCLOSEWAIT1:
@@ -161,10 +161,15 @@ void smc_close_active_abort(struct smc_sock *smc)
	}

	sock_set_flag(&smc->sk, SOCK_DEAD);
	bh_unlock_sock(&smc->sk);
	smc->sk.sk_state_change(&smc->sk);
}

static inline bool smc_close_sent_any_close(struct smc_connection *conn)
{
	return conn->local_tx_ctrl.conn_state_flags.peer_conn_abort ||
	       conn->local_tx_ctrl.conn_state_flags.peer_conn_closed;
}

int smc_close_active(struct smc_sock *smc)
{
	struct smc_cdc_conn_state_flags *txflags =
@@ -185,8 +190,7 @@ int smc_close_active(struct smc_sock *smc)
	case SMC_INIT:
		sk->sk_state = SMC_CLOSED;
		if (smc->smc_listen_work.func)
			flush_work(&smc->smc_listen_work);
		sock_put(sk);
			cancel_work_sync(&smc->smc_listen_work);
		break;
	case SMC_LISTEN:
		sk->sk_state = SMC_CLOSED;
@@ -198,7 +202,7 @@ int smc_close_active(struct smc_sock *smc)
		}
		release_sock(sk);
		smc_close_cleanup_listen(sk);
		flush_work(&smc->tcp_listen_work);
		cancel_work_sync(&smc->smc_listen_work);
		lock_sock(sk);
		break;
	case SMC_ACTIVE:
@@ -218,7 +222,7 @@ int smc_close_active(struct smc_sock *smc)
	case SMC_APPFINCLOSEWAIT:
		/* socket already shutdown wr or both (active close) */
		if (txflags->peer_done_writing &&
		    !txflags->peer_conn_closed) {
		    !smc_close_sent_any_close(conn)) {
			/* just shutdown wr done, send close request */
			rc = smc_close_final(conn);
		}
@@ -248,6 +252,13 @@ int smc_close_active(struct smc_sock *smc)
		break;
	case SMC_PEERCLOSEWAIT1:
	case SMC_PEERCLOSEWAIT2:
		if (txflags->peer_done_writing &&
		    !smc_close_sent_any_close(conn)) {
			/* just shutdown wr done, send close request */
			rc = smc_close_final(conn);
		}
		/* peer sending PeerConnectionClosed will cause transition */
		break;
	case SMC_PEERFINCLOSEWAIT:
		/* peer sending PeerConnectionClosed will cause transition */
		break;
@@ -285,7 +296,7 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
	case SMC_PEERCLOSEWAIT1:
	case SMC_PEERCLOSEWAIT2:
		if (txflags->peer_done_writing &&
		    !txflags->peer_conn_closed) {
		    !smc_close_sent_any_close(&smc->conn)) {
			/* just shutdown, but not yet closed locally */
			smc_close_abort(&smc->conn);
			sk->sk_state = SMC_PROCESSABORT;
@@ -306,22 +317,27 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)

/* Some kind of closing has been received: peer_conn_closed, peer_conn_abort,
 * or peer_done_writing.
 * Called under tasklet context.
 */
void smc_close_passive_received(struct smc_sock *smc)
static void smc_close_passive_work(struct work_struct *work)
{
	struct smc_cdc_conn_state_flags *rxflags =
		&smc->conn.local_rx_ctrl.conn_state_flags;
	struct smc_connection *conn = container_of(work,
						   struct smc_connection,
						   close_work);
	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
	struct smc_cdc_conn_state_flags *rxflags;
	struct sock *sk = &smc->sk;
	int old_state;

	sk->sk_shutdown |= RCV_SHUTDOWN;
	if (smc->clcsock && smc->clcsock->sk)
		smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
	sock_set_flag(&smc->sk, SOCK_DONE);

	lock_sock(&smc->sk);
	old_state = sk->sk_state;

	if (!conn->alert_token_local) {
		/* abnormal termination */
		smc_close_active_abort(smc);
		goto wakeup;
	}

	rxflags = &smc->conn.local_rx_ctrl.conn_state_flags;
	if (rxflags->peer_conn_abort) {
		smc_close_passive_abort_received(smc);
		goto wakeup;
@@ -331,7 +347,7 @@ void smc_close_passive_received(struct smc_sock *smc)
	case SMC_INIT:
		if (atomic_read(&smc->conn.bytes_to_rcv) ||
		    (rxflags->peer_done_writing &&
		     !rxflags->peer_conn_closed))
		     !smc_cdc_rxed_any_close(conn)))
			sk->sk_state = SMC_APPCLOSEWAIT1;
		else
			sk->sk_state = SMC_CLOSED;
@@ -348,7 +364,7 @@ void smc_close_passive_received(struct smc_sock *smc)
		if (!smc_cdc_rxed_any_close(&smc->conn))
			break;
		if (sock_flag(sk, SOCK_DEAD) &&
		    (sk->sk_shutdown == SHUTDOWN_MASK)) {
		    smc_close_sent_any_close(conn)) {
			/* smc_release has already been called locally */
			sk->sk_state = SMC_CLOSED;
		} else {
@@ -367,18 +383,20 @@ void smc_close_passive_received(struct smc_sock *smc)
	}

wakeup:
	if (old_state != sk->sk_state)
		sk->sk_state_change(sk);
	sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */
	sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */

	if (old_state != sk->sk_state) {
		sk->sk_state_change(sk);
		if ((sk->sk_state == SMC_CLOSED) &&
	    (sock_flag(sk, SOCK_DEAD) || (old_state == SMC_INIT))) {
		    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
			smc_conn_free(&smc->conn);
			schedule_delayed_work(&smc->sock_put_work,
					      SMC_CLOSE_SOCK_PUT_DELAY);
		}
	}
	release_sock(&smc->sk);
}

void smc_close_sock_put_work(struct work_struct *work)
{
@@ -442,3 +460,9 @@ int smc_close_shutdown_write(struct smc_sock *smc)
		sk->sk_state_change(&smc->sk);
	return rc;
}

/* Initialize close properties on connection establishment. */
void smc_close_init(struct smc_sock *smc)
{
	INIT_WORK(&smc->conn.close_work, smc_close_passive_work);
}
+1 −1
Original line number Diff line number Diff line
@@ -21,8 +21,8 @@
void smc_close_wake_tx_prepared(struct smc_sock *smc);
void smc_close_active_abort(struct smc_sock *smc);
int smc_close_active(struct smc_sock *smc);
void smc_close_passive_received(struct smc_sock *smc);
void smc_close_sock_put_work(struct work_struct *work);
int smc_close_shutdown_write(struct smc_sock *smc);
void smc_close_init(struct smc_sock *smc);

#endif /* SMC_CLOSE_H */
Loading