Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aedec080 authored by Sean Hefty's avatar Sean Hefty Committed by Roland Dreier
Browse files

RDMA/cma: Increment port number after close to avoid re-use



Randomize the starting port number and avoid re-using port values
immediately after they are closed.  Instead keep track of the last
port value used and increment it every time a new port number is
assigned, to better replicate other port spaces.

Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 65e5c026
Loading
Loading
Loading
Loading
+56 −10
Original line number Original line Diff line number Diff line
@@ -71,6 +71,7 @@ static struct workqueue_struct *cma_wq;
static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(tcp_ps);
static DEFINE_IDR(tcp_ps);
static DEFINE_IDR(udp_ps);
static DEFINE_IDR(udp_ps);
static int next_port;


struct cma_device {
struct cma_device {
	struct list_head	list;
	struct list_head	list;
@@ -1722,33 +1723,74 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
			  unsigned short snum)
			  unsigned short snum)
{
{
	struct rdma_bind_list *bind_list;
	struct rdma_bind_list *bind_list;
	int port, start, ret;
	int port, ret;


	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
	if (!bind_list)
	if (!bind_list)
		return -ENOMEM;
		return -ENOMEM;


	start = snum ? snum : sysctl_local_port_range[0];
	do {
		ret = idr_get_new_above(ps, bind_list, snum, &port);
	} while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));

	if (ret)
		goto err1;

	if (port != snum) {
		ret = -EADDRNOTAVAIL;
		goto err2;
	}

	bind_list->ps = ps;
	bind_list->port = (unsigned short) port;
	cma_bind_port(bind_list, id_priv);
	return 0;
err2:
	idr_remove(ps, port);
err1:
	kfree(bind_list);
	return ret;
}

static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
{
	struct rdma_bind_list *bind_list;
	int port, ret;

	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
	if (!bind_list)
		return -ENOMEM;


retry:
	do {
	do {
		ret = idr_get_new_above(ps, bind_list, start, &port);
		ret = idr_get_new_above(ps, bind_list, next_port, &port);
	} while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
	} while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));


	if (ret)
	if (ret)
		goto err;
		goto err1;


	if ((snum && port != snum) ||
	if (port > sysctl_local_port_range[1]) {
	    (!snum && port > sysctl_local_port_range[1])) {
		if (next_port != sysctl_local_port_range[0]) {
			idr_remove(ps, port);
			idr_remove(ps, port);
			next_port = sysctl_local_port_range[0];
			goto retry;
		}
		ret = -EADDRNOTAVAIL;
		ret = -EADDRNOTAVAIL;
		goto err;
		goto err2;
	}
	}


	if (port == sysctl_local_port_range[1])
		next_port = sysctl_local_port_range[0];
	else
		next_port = port + 1;

	bind_list->ps = ps;
	bind_list->ps = ps;
	bind_list->port = (unsigned short) port;
	bind_list->port = (unsigned short) port;
	cma_bind_port(bind_list, id_priv);
	cma_bind_port(bind_list, id_priv);
	return 0;
	return 0;
err:
err2:
	idr_remove(ps, port);
err1:
	kfree(bind_list);
	kfree(bind_list);
	return ret;
	return ret;
}
}
@@ -1811,7 +1853,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)


	mutex_lock(&lock);
	mutex_lock(&lock);
	if (cma_any_port(&id_priv->id.route.addr.src_addr))
	if (cma_any_port(&id_priv->id.route.addr.src_addr))
		ret = cma_alloc_port(ps, id_priv, 0);
		ret = cma_alloc_any_port(ps, id_priv);
	else
	else
		ret = cma_use_port(ps, id_priv);
		ret = cma_use_port(ps, id_priv);
	mutex_unlock(&lock);
	mutex_unlock(&lock);
@@ -2448,6 +2490,10 @@ static int cma_init(void)
{
{
	int ret;
	int ret;


	get_random_bytes(&next_port, sizeof next_port);
	next_port = (next_port % (sysctl_local_port_range[1] -
				  sysctl_local_port_range[0])) +
		    sysctl_local_port_range[0];
	cma_wq = create_singlethread_workqueue("rdma_cm_wq");
	cma_wq = create_singlethread_workqueue("rdma_cm_wq");
	if (!cma_wq)
	if (!cma_wq)
		return -ENOMEM;
		return -ENOMEM;