Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 77ec3a0e authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-smc-small-features'



Ursula Braun says:

====================
net/smc: small features 2018/04/30

here are 4 smc patches for net-next covering small new features
in different areas:
   * link health check
   * diagnostics for IPv6 smc sockets
   * ioctl
   * improvement for vlan determination

v2 changes:
   * better title
   * patch 2 - remove compile problem for disabled CONFIG_IPV6
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e90c1a10 cb9d43f6
Loading
Loading
Loading
Loading
+34 −5
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <net/smc.h>
#include <asm/ioctls.h>

#include "smc.h"
#include "smc_clc.h"
@@ -294,6 +295,7 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)

static int smc_clnt_conf_first_link(struct smc_sock *smc)
{
	struct net *net = sock_net(smc->clcsock->sk);
	struct smc_link_group *lgr = smc->conn.lgr;
	struct smc_link *link;
	int rest;
@@ -353,7 +355,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
	if (rc < 0)
		return SMC_CLC_DECL_TCL;

	link->state = SMC_LNK_ACTIVE;
	smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);

	return 0;
}
@@ -715,6 +717,7 @@ void smc_close_non_accepted(struct sock *sk)

static int smc_serv_conf_first_link(struct smc_sock *smc)
{
	struct net *net = sock_net(smc->clcsock->sk);
	struct smc_link_group *lgr = smc->conn.lgr;
	struct smc_link *link;
	int rest;
@@ -769,7 +772,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
		return rc;
	}

	link->state = SMC_LNK_ACTIVE;
	smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);

	return 0;
}
@@ -1387,12 +1390,38 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
		     unsigned long arg)
{
	struct smc_sock *smc;
	int answ;

	smc = smc_sk(sock->sk);
	if (smc->use_fallback)
	if (smc->use_fallback) {
		if (!smc->clcsock)
			return -EBADF;
		return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
	else
		return sock_no_ioctl(sock, cmd, arg);
	}
	switch (cmd) {
	case SIOCINQ: /* same as FIONREAD */
		if (smc->sk.sk_state == SMC_LISTEN)
			return -EINVAL;
		answ = atomic_read(&smc->conn.bytes_to_rcv);
		break;
	case SIOCOUTQ:
		/* output queue size (not send + not acked) */
		if (smc->sk.sk_state == SMC_LISTEN)
			return -EINVAL;
		answ = smc->conn.sndbuf_size -
					atomic_read(&smc->conn.sndbuf_space);
		break;
	case SIOCOUTQNSD:
		/* output queue size (not send only) */
		if (smc->sk.sk_state == SMC_LISTEN)
			return -EINVAL;
		answ = smc_tx_prepared_sends(&smc->conn);
		break;
	default:
		return -ENOIOCTLCMD;
	}

	return put_user(answ, (int __user *)arg);
}

static ssize_t smc_sendpage(struct socket *sock, struct page *page,
+25 −3
Original line number Diff line number Diff line
@@ -310,6 +310,7 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr)
/* remove a link group */
void smc_lgr_free(struct smc_link_group *lgr)
{
	smc_llc_link_flush(&lgr->lnk[SMC_SINGLE_LINK]);
	smc_lgr_free_bufs(lgr);
	smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
	kfree(lgr);
@@ -332,6 +333,7 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
	struct rb_node *node;

	smc_lgr_forget(lgr);
	smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);

	write_lock_bh(&lgr->conns_lock);
	node = rb_first(&lgr->conns_all);
@@ -358,7 +360,8 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
{
	struct dst_entry *dst = sk_dst_get(clcsock->sk);
	int rc = 0;
	struct net_device *ndev;
	int i, nest_lvl, rc = 0;

	*vlan_id = 0;
	if (!dst) {
@@ -370,8 +373,27 @@ static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
		goto out_rel;
	}

	if (is_vlan_dev(dst->dev))
		*vlan_id = vlan_dev_vlan_id(dst->dev);
	ndev = dst->dev;
	if (is_vlan_dev(ndev)) {
		*vlan_id = vlan_dev_vlan_id(ndev);
		goto out_rel;
	}

	rtnl_lock();
	nest_lvl = dev_get_nest_level(ndev);
	for (i = 0; i < nest_lvl; i++) {
		struct list_head *lower = &ndev->adj_list.lower;

		if (list_empty(lower))
			break;
		lower = lower->next;
		ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
		if (is_vlan_dev(ndev)) {
			*vlan_id = vlan_dev_vlan_id(ndev);
			break;
		}
	}
	rtnl_unlock();

out_rel:
	dst_release(dst);
+4 −0
Original line number Diff line number Diff line
@@ -79,6 +79,7 @@ struct smc_link {
	dma_addr_t		wr_rx_dma_addr;	/* DMA address of wr_rx_bufs */
	u64			wr_rx_id;	/* seq # of last recv WR */
	u32			wr_rx_cnt;	/* number of WR recv buffers */
	unsigned long		wr_rx_tstamp;	/* jiffies when last buf rx */

	struct ib_reg_wr	wr_reg;		/* WR register memory region */
	wait_queue_head_t	wr_reg_wait;	/* wait for wr_reg result */
@@ -101,6 +102,9 @@ struct smc_link {
	int			llc_confirm_resp_rc; /* rc from conf_resp msg */
	struct completion	llc_add;	/* wait for rx of add link */
	struct completion	llc_add_resp;	/* wait for rx of add link rsp*/
	struct delayed_work	llc_testlink_wrk; /* testlink worker */
	struct completion	llc_testlink_resp; /* wait for rx of testlink */
	int			llc_testlink_time; /* testlink interval */
};

/* For now we just allow one parallel link per link group. The SMC protocol
+30 −9
Original line number Diff line number Diff line
@@ -38,17 +38,27 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
{
	struct smc_sock *smc = smc_sk(sk);

	r->diag_family = sk->sk_family;
	if (!smc->clcsock)
		return;
	r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
	r->id.idiag_dport = smc->clcsock->sk->sk_dport;
	r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
	sock_diag_save_cookie(sk, r->id.idiag_cookie);
	if (sk->sk_protocol == SMCPROTO_SMC) {
		r->diag_family = PF_INET;
		memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
		memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
		r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
		r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
#if IS_ENABLED(CONFIG_IPV6)
	} else if (sk->sk_protocol == SMCPROTO_SMC6) {
		r->diag_family = PF_INET6;
		memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
		       sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
		memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
		       sizeof(smc->clcsock->sk->sk_v6_daddr));
#endif
	}
}

static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
@@ -153,7 +163,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
	return -EMSGSIZE;
}

static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
			       struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *bc = NULL;
@@ -161,8 +172,8 @@ static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
	struct sock *sk;
	int rc = 0;

	read_lock(&smc_proto.h.smc_hash->lock);
	head = &smc_proto.h.smc_hash->ht;
	read_lock(&prot->h.smc_hash->lock);
	head = &prot->h.smc_hash->ht;
	if (hlist_empty(head))
		goto out;

@@ -175,7 +186,17 @@ static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
	}

out:
	read_unlock(&smc_proto.h.smc_hash->lock);
	read_unlock(&prot->h.smc_hash->lock);
	return rc;
}

static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int rc = 0;

	rc = smc_diag_dump_proto(&smc_proto, skb, cb);
	if (!rc)
		rc = smc_diag_dump_proto(&smc_proto6, skb, cb);
	return rc;
}

+61 −1
Original line number Diff line number Diff line
@@ -397,7 +397,8 @@ static void smc_llc_rx_test_link(struct smc_link *link,
				 struct smc_llc_msg_test_link *llc)
{
	if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
		/* unused as long as we don't send this type of msg */
		if (link->state == SMC_LNK_ACTIVE)
			complete(&link->llc_testlink_resp);
	} else {
		smc_llc_send_test_link(link, llc->user_data, SMC_LLC_RESP);
	}
@@ -502,6 +503,65 @@ static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
	}
}

/***************************** worker ****************************************/

static void smc_llc_testlink_work(struct work_struct *work)
{
	struct smc_link *link = container_of(to_delayed_work(work),
					     struct smc_link, llc_testlink_wrk);
	unsigned long next_interval;
	struct smc_link_group *lgr;
	unsigned long expire_time;
	u8 user_data[16] = { 0 };
	int rc;

	lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
	if (link->state != SMC_LNK_ACTIVE)
		return;		/* don't reschedule worker */
	expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
	if (time_is_after_jiffies(expire_time)) {
		next_interval = expire_time - jiffies;
		goto out;
	}
	reinit_completion(&link->llc_testlink_resp);
	smc_llc_send_test_link(link, user_data, SMC_LLC_REQ);
	/* receive TEST LINK response over RoCE fabric */
	rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
						       SMC_LLC_WAIT_TIME);
	if (rc <= 0) {
		smc_lgr_terminate(lgr);
		return;
	}
	next_interval = link->llc_testlink_time;
out:
	schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
}

void smc_llc_link_active(struct smc_link *link, int testlink_time)
{
	init_completion(&link->llc_testlink_resp);
	INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
	link->state = SMC_LNK_ACTIVE;
	if (testlink_time) {
		link->llc_testlink_time = testlink_time * HZ;
		schedule_delayed_work(&link->llc_testlink_wrk,
				      link->llc_testlink_time);
	}
}

/* called in tasklet context */
void smc_llc_link_inactive(struct smc_link *link)
{
	link->state = SMC_LNK_INACTIVE;
	cancel_delayed_work(&link->llc_testlink_wrk);
}

/* called in worker context */
void smc_llc_link_flush(struct smc_link *link)
{
	cancel_delayed_work_sync(&link->llc_testlink_wrk);
}

/***************************** init, exit, misc ******************************/

static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
Loading