Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 50b464b3 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'smc-next'



Ursula Braun says:

====================
patches 2018-05-23

here are more smc-patches for net-next:

Patch 1 fixes an ioctl problem detected by syzbot.

Patch 2 improves smc_lgr_list locking in case of abnormal link
group termination. If you want to receive a version for the net-tree,
please let me know. It would look somewhat different, since the port
terminate code has been moved to smc_core.c on net-next.

Patch 3 enables SMC to deal with urgent data.

Patch 4 is a minor improvement to avoid out-of-sync linkgroups
between 2 peers.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8156b0ba 7f58a1ad
Loading
Loading
Loading
Loading
+37 −5
Original line number Diff line number Diff line
@@ -8,8 +8,6 @@
 *
 *  Initial restrictions:
 *    - support for alternate links postponed
 *    - partial support for non-blocking sockets only
 *    - support for urgent data postponed
 *
 *  Copyright IBM Corp. 2016, 2018
 *
@@ -1338,6 +1336,8 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
			if (sk->sk_state == SMC_APPCLOSEWAIT1)
				mask |= EPOLLIN;
		}
		if (smc->conn.urg_state == SMC_URG_VALID)
			mask |= EPOLLPRI;

	}
	release_sock(sk);
@@ -1477,10 +1477,13 @@ static int smc_getsockopt(struct socket *sock, int level, int optname,
static int smc_ioctl(struct socket *sock, unsigned int cmd,
		     unsigned long arg)
{
	union smc_host_cursor cons, urg;
	struct smc_connection *conn;
	struct smc_sock *smc;
	int answ;

	smc = smc_sk(sock->sk);
	conn = &smc->conn;
	if (smc->use_fallback) {
		if (!smc->clcsock)
			return -EBADF;
@@ -1490,12 +1493,20 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
	case SIOCINQ: /* same as FIONREAD */
		if (smc->sk.sk_state == SMC_LISTEN)
			return -EINVAL;
		if (smc->sk.sk_state == SMC_INIT ||
		    smc->sk.sk_state == SMC_CLOSED)
			answ = 0;
		else
			answ = atomic_read(&smc->conn.bytes_to_rcv);
		break;
	case SIOCOUTQ:
		/* output queue size (not send + not acked) */
		if (smc->sk.sk_state == SMC_LISTEN)
			return -EINVAL;
		if (smc->sk.sk_state == SMC_INIT ||
		    smc->sk.sk_state == SMC_CLOSED)
			answ = 0;
		else
			answ = smc->conn.sndbuf_desc->len -
					atomic_read(&smc->conn.sndbuf_space);
		break;
@@ -1503,8 +1514,29 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
		/* output queue size (not send only) */
		if (smc->sk.sk_state == SMC_LISTEN)
			return -EINVAL;
		if (smc->sk.sk_state == SMC_INIT ||
		    smc->sk.sk_state == SMC_CLOSED)
			answ = 0;
		else
			answ = smc_tx_prepared_sends(&smc->conn);
		break;
	case SIOCATMARK:
		if (smc->sk.sk_state == SMC_LISTEN)
			return -EINVAL;
		if (smc->sk.sk_state == SMC_INIT ||
		    smc->sk.sk_state == SMC_CLOSED) {
			answ = 0;
		} else {
			smc_curs_write(&cons,
			       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
				       conn);
			smc_curs_write(&urg,
				       smc_curs_read(&conn->urg_curs, conn),
				       conn);
			answ = smc_curs_diff(conn->rmb_desc->len,
					     &cons, &urg) == 1;
		}
		break;
	default:
		return -ENOIOCTLCMD;
	}
+15 −0
Original line number Diff line number Diff line
@@ -114,6 +114,12 @@ struct smc_host_cdc_msg { /* Connection Data Control message */
	u8				reserved[18];
} __aligned(8);

enum smc_urg_state {
	SMC_URG_VALID,			/* data present */
	SMC_URG_NOTYET,			/* data pending */
	SMC_URG_READ			/* data was already read */
};

struct smc_connection {
	struct rb_node		alert_node;
	struct smc_link_group	*lgr;		/* link group of connection */
@@ -160,6 +166,15 @@ struct smc_connection {
	union smc_host_cursor	rx_curs_confirmed; /* confirmed to peer
						    * source of snd_una ?
						    */
	union smc_host_cursor	urg_curs;	/* points at urgent byte */
	enum smc_urg_state	urg_state;
	bool			urg_tx_pend;	/* urgent data staged */
	bool			urg_rx_skip_pend;
						/* indicate urgent oob data
						 * read, but previous regular
						 * data still pending
						 */
	char			urg_rx_byte;	/* urgent byte */
	atomic_t		bytes_to_rcv;	/* arrived data,
						 * not yet received
						 */
+41 −3
Original line number Diff line number Diff line
@@ -164,6 +164,28 @@ static inline bool smc_cdc_before(u16 seq1, u16 seq2)
	return (s16)(seq1 - seq2) < 0;
}

static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
					    int *diff_prod)
{
	struct smc_connection *conn = &smc->conn;
	char *base;

	/* new data included urgent business */
	smc_curs_write(&conn->urg_curs,
		       smc_curs_read(&conn->local_rx_ctrl.prod, conn),
		       conn);
	conn->urg_state = SMC_URG_VALID;
	if (!sock_flag(&smc->sk, SOCK_URGINLINE))
		/* we'll skip the urgent byte, so don't account for it */
		(*diff_prod)--;
	base = (char *)conn->rmb_desc->cpu_addr;
	if (conn->urg_curs.count)
		conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
	else
		conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1);
	sk_send_sigurg(&smc->sk);
}

static void smc_cdc_msg_recv_action(struct smc_sock *smc,
				    struct smc_cdc_msg *cdc)
{
@@ -194,15 +216,25 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
	diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,
				  &conn->local_rx_ctrl.prod);
	if (diff_prod) {
		if (conn->local_rx_ctrl.prod_flags.urg_data_present)
			smc_cdc_handle_urg_data_arrival(smc, &diff_prod);
		/* bytes_to_rcv is decreased in smc_recvmsg */
		smp_mb__before_atomic();
		atomic_add(diff_prod, &conn->bytes_to_rcv);
		/* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
		smp_mb__after_atomic();
		smc->sk.sk_data_ready(&smc->sk);
	} else if ((conn->local_rx_ctrl.prod_flags.write_blocked) ||
		   (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req)) {
		smc->sk.sk_data_ready(&smc->sk);
	} else {
		if (conn->local_rx_ctrl.prod_flags.write_blocked ||
		    conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
		    conn->local_rx_ctrl.prod_flags.urg_data_pending) {
			if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
				conn->urg_state = SMC_URG_NOTYET;
			/* force immediate tx of current consumer cursor, but
			 * under send_lock to guarantee arrival in seqno-order
			 */
			smc_tx_sndbuf_nonempty(conn);
		}
	}

	/* piggy backed tx info */
@@ -212,6 +244,12 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
		/* trigger socket release if connection closed */
		smc_close_wake_tx_prepared(smc);
	}
	if (diff_cons && conn->urg_tx_pend &&
	    atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
		/* urg data confirmed by peer, indicate we're ready for more */
		conn->urg_tx_pend = false;
		smc->sk.sk_write_space(&smc->sk);
	}

	if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
		smc->sk.sk_err = ECONNRESET;
+13 −0
Original line number Diff line number Diff line
@@ -146,6 +146,19 @@ static inline int smc_curs_diff(unsigned int size,
	return max_t(int, 0, (new->count - old->count));
}

/* calculate cursor difference between old and new - returns negative
 * value in case old > new
 */
static inline int smc_curs_comp(unsigned int size,
				union smc_host_cursor *old,
				union smc_host_cursor *new)
{
	if (old->wrap > new->wrap ||
	    (old->wrap == new->wrap && old->count > new->count))
		return -smc_curs_diff(size, new, old);
	return smc_curs_diff(size, old, new);
}

static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
					  union smc_host_cursor *local,
					  struct smc_connection *conn)
+15 −4
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@

#define SMC_LGR_NUM_INCR		256
#define SMC_LGR_FREE_DELAY_SERV		(600 * HZ)
#define SMC_LGR_FREE_DELAY_CLNT		(SMC_LGR_FREE_DELAY_SERV + 10)
#define SMC_LGR_FREE_DELAY_CLNT		(SMC_LGR_FREE_DELAY_SERV + 10 * HZ)

static struct smc_lgr_list smc_lgr_list = {	/* established link groups */
	.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
@@ -346,7 +346,7 @@ void smc_lgr_forget(struct smc_link_group *lgr)
}

/* terminate linkgroup abnormally */
void smc_lgr_terminate(struct smc_link_group *lgr)
static void __smc_lgr_terminate(struct smc_link_group *lgr)
{
	struct smc_connection *conn;
	struct smc_sock *smc;
@@ -355,7 +355,8 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
	if (lgr->terminating)
		return;	/* lgr already terminating */
	lgr->terminating = 1;
	smc_lgr_forget(lgr);
	if (!list_empty(&lgr->list)) /* forget lgr */
		list_del_init(&lgr->list);
	smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);

	write_lock_bh(&lgr->conns_lock);
@@ -377,16 +378,25 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
	smc_lgr_schedule_free_work(lgr);
}

void smc_lgr_terminate(struct smc_link_group *lgr)
{
	spin_lock_bh(&smc_lgr_list.lock);
	__smc_lgr_terminate(lgr);
	spin_unlock_bh(&smc_lgr_list.lock);
}

/* Called when IB port is terminated */
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
{
	struct smc_link_group *lgr, *l;

	spin_lock_bh(&smc_lgr_list.lock);
	list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
		if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
		    lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
			smc_lgr_terminate(lgr);
			__smc_lgr_terminate(lgr);
	}
	spin_unlock_bh(&smc_lgr_list.lock);
}

/* Determine vlan of internal TCP socket.
@@ -534,6 +544,7 @@ int smc_conn_create(struct smc_sock *smc,
	}
	conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
	conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
	conn->urg_state = SMC_URG_READ;
#ifndef KERNEL_HAS_ATOMIC64
	spin_lock_init(&conn->acurs_lock);
#endif
Loading