Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc81a069 authored by Doug Ledford's avatar Doug Ledford
Browse files

Merge branch 'k.o/for-4.3-v1' into k.o/for-4.4

Pick up the late fixes from the 4.3 cycle so we have them in our
next branch.
parents 070b3997 0ca81a28
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -508,12 +508,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
	memset(&gid_attr, 0, sizeof(gid_attr));
	gid_attr.ndev = ndev;

	mutex_lock(&table->lock);
	ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT);

	/* Coudn't find default GID location */
	WARN_ON(ix < 0);

	mutex_lock(&table->lock);
	if (!__ib_cache_gid_get(ib_dev, port, ix,
				&current_gid, &current_gid_attr) &&
	    mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
+9 −1
Original line number Diff line number Diff line
@@ -835,6 +835,11 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
	case IB_CM_SIDR_REQ_RCVD:
		spin_unlock_irq(&cm_id_priv->lock);
		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
		spin_lock_irq(&cm.lock);
		if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
			rb_erase(&cm_id_priv->sidr_id_node,
				 &cm.remote_sidr_table);
		spin_unlock_irq(&cm.lock);
		break;
	case IB_CM_REQ_SENT:
	case IB_CM_MRA_REQ_RCVD:
@@ -3172,7 +3177,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
	spin_unlock_irqrestore(&cm_id_priv->lock, flags);

	spin_lock_irqsave(&cm.lock, flags);
	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
	}
	spin_unlock_irqrestore(&cm.lock, flags);
	return 0;

+43 −17
Original line number Diff line number Diff line
@@ -1067,14 +1067,14 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
		       sizeof(req->local_gid));
		req->has_gid	= true;
		req->service_id	= req_param->primary_path->service_id;
		req->pkey	= req_param->bth_pkey;
		req->pkey	= be16_to_cpu(req_param->primary_path->pkey);
		break;
	case IB_CM_SIDR_REQ_RECEIVED:
		req->device	= sidr_param->listen_id->device;
		req->port	= sidr_param->port;
		req->has_gid	= false;
		req->service_id	= sidr_param->service_id;
		req->pkey	= sidr_param->bth_pkey;
		req->pkey	= sidr_param->pkey;
		break;
	default:
		return -EINVAL;
@@ -1232,14 +1232,32 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv,
	return true;
}

static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num)
{
	enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num);
	enum rdma_transport_type transport =
		rdma_node_get_transport(device->node_type);

	return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB;
}

static bool cma_protocol_roce(const struct rdma_cm_id *id)
{
	struct ib_device *device = id->device;
	const int port_num = id->port_num ?: rdma_start_port(device);

	return cma_protocol_roce_dev_port(device, port_num);
}

static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
			      const struct net_device *net_dev)
{
	const struct rdma_addr *addr = &id_priv->id.route.addr;

	if (!net_dev)
		/* This request is an AF_IB request */
		return addr->src_addr.ss_family == AF_IB;
		/* This request is an AF_IB request or a RoCE request */
		return addr->src_addr.ss_family == AF_IB ||
		       cma_protocol_roce(&id_priv->id);

	return !addr->dev_addr.bound_dev_if ||
	       (net_eq(dev_net(net_dev), &init_net) &&
@@ -1294,6 +1312,10 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
		if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
			/* Assuming the protocol is AF_IB */
			*net_dev = NULL;
		} else if (cma_protocol_roce_dev_port(req.device, req.port)) {
			/* TODO find the net dev matching the request parameters
			 * through the RoCE GID table */
			*net_dev = NULL;
		} else {
			return ERR_CAST(*net_dev);
		}
@@ -1302,7 +1324,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
	bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
				cma_port_from_service_id(req.service_id));
	id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
	if (IS_ERR(id_priv)) {
	if (IS_ERR(id_priv) && *net_dev) {
		dev_put(*net_dev);
		*net_dev = NULL;
	}
@@ -1593,11 +1615,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
		if (ret)
			goto err;
	} else {
		/* An AF_IB connection */
		WARN_ON_ONCE(ss_family != AF_IB);

		cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv),
				 &rt->addr.dev_addr);
		if (!cma_protocol_roce(listen_id) &&
		    cma_any_addr(cma_src_addr(id_priv))) {
			rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
			rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
			ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
		} else if (!cma_any_addr(cma_src_addr(id_priv))) {
			ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
			if (ret)
				goto err;
		}
	}
	rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);

@@ -1635,13 +1662,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
		if (ret)
			goto err;
	} else {
		/* An AF_IB connection */
		WARN_ON_ONCE(ss_family != AF_IB);

		if (!cma_any_addr(cma_src_addr(id_priv)))
			cma_translate_ib((struct sockaddr_ib *)
						cma_src_addr(id_priv),
		if (!cma_any_addr(cma_src_addr(id_priv))) {
			ret = cma_translate_addr(cma_src_addr(id_priv),
						 &id->route.addr.dev_addr);
			if (ret)
				goto err;
		}
	}

	id_priv->state = RDMA_CM_CONNECT;
+27 −8
Original line number Diff line number Diff line
@@ -250,25 +250,44 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
				 u8 port, struct net_device *ndev)
{
	struct in_device *in_dev;
	struct sin_list {
		struct list_head	list;
		struct sockaddr_in	ip;
	};
	struct sin_list *sin_iter;
	struct sin_list *sin_temp;

	LIST_HEAD(sin_list);
	if (ndev->reg_state >= NETREG_UNREGISTERING)
		return;

	in_dev = in_dev_get(ndev);
	if (!in_dev)
	rcu_read_lock();
	in_dev = __in_dev_get_rcu(ndev);
	if (!in_dev) {
		rcu_read_unlock();
		return;
	}

	for_ifa(in_dev) {
		struct sockaddr_in ip;
		struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);

		ip.sin_family = AF_INET;
		ip.sin_addr.s_addr = ifa->ifa_address;
		update_gid_ip(GID_ADD, ib_dev, port, ndev,
			      (struct sockaddr *)&ip);
		if (!entry) {
			pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
			continue;
		}
		entry->ip.sin_family = AF_INET;
		entry->ip.sin_addr.s_addr = ifa->ifa_address;
		list_add_tail(&entry->list, &sin_list);
	}
	endfor_ifa(in_dev);
	rcu_read_unlock();

	in_dev_put(in_dev);
	list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
		update_gid_ip(GID_ADD, ib_dev, port, ndev,
			      (struct sockaddr *)&sin_iter->ip);
		list_del(&sin_iter->list);
		kfree(sin_iter);
	}
}

static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
+6 −1
Original line number Diff line number Diff line
@@ -1624,11 +1624,16 @@ static int ucma_open(struct inode *inode, struct file *filp)
	if (!file)
		return -ENOMEM;

	file->close_wq = create_singlethread_workqueue("ucma_close_id");
	if (!file->close_wq) {
		kfree(file);
		return -ENOMEM;
	}

	INIT_LIST_HEAD(&file->event_list);
	INIT_LIST_HEAD(&file->ctx_list);
	init_waitqueue_head(&file->poll_wait);
	mutex_init(&file->mut);
	file->close_wq = create_singlethread_workqueue("ucma_close_id");

	filp->private_data = file;
	file->filp = filp;
Loading