Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6fa1f2f0 authored by Doug Ledford's avatar Doug Ledford
Browse files

Merge branches 'hfi1' and 'mlx' into k.o/for-4.9-rc

parents 2b16056f 6d931308
Loading
Loading
Loading
Loading
+9 −2
Original line number Diff line number Diff line
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
	struct rdma_dev_addr *addr;
	struct completion comp;
	int status;
};

static void resolve_cb(int status, struct sockaddr *src_addr,
	     struct rdma_dev_addr *addr, void *context)
{
	memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
				rdma_dev_addr));
	if (!status)
		memcpy(((struct resolve_cb_context *)context)->addr,
		       addr, sizeof(struct rdma_dev_addr));
	((struct resolve_cb_context *)context)->status = status;
	complete(&((struct resolve_cb_context *)context)->comp);
}

@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,

	wait_for_completion(&ctx.comp);

	ret = ctx.status;
	if (ret)
		return ret;

	memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
	dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
	if (!dev)
+110 −16
Original line number Diff line number Diff line
@@ -80,6 +80,8 @@ static struct ib_cm {
	__be32 random_id_operand;
	struct list_head timewait_list;
	struct workqueue_struct *wq;
	/* Sync on cm change port state */
	spinlock_t state_lock;
} cm;

/* Counter indexes ordered by attribute ID */
@@ -161,6 +163,8 @@ struct cm_port {
	struct ib_mad_agent *mad_agent;
	struct kobject port_obj;
	u8 port_num;
	struct list_head cm_priv_prim_list;
	struct list_head cm_priv_altr_list;
	struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
};

@@ -241,6 +245,12 @@ struct cm_id_private {
	u8 service_timeout;
	u8 target_ack_delay;

	struct list_head prim_list;
	struct list_head altr_list;
	/* Indicates that the send port mad is registered and av is set */
	int prim_send_port_not_ready;
	int altr_send_port_not_ready;

	struct list_head work_list;
	atomic_t work_count;
};
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
	struct ib_mad_agent *mad_agent;
	struct ib_mad_send_buf *m;
	struct ib_ah *ah;
	struct cm_av *av;
	unsigned long flags, flags2;
	int ret = 0;

	/* don't let the port to be released till the agent is down */
	spin_lock_irqsave(&cm.state_lock, flags2);
	spin_lock_irqsave(&cm.lock, flags);
	if (!cm_id_priv->prim_send_port_not_ready)
		av = &cm_id_priv->av;
	else if (!cm_id_priv->altr_send_port_not_ready &&
		 (cm_id_priv->alt_av.port))
		av = &cm_id_priv->alt_av;
	else {
		pr_info("%s: not valid CM id\n", __func__);
		ret = -ENODEV;
		spin_unlock_irqrestore(&cm.lock, flags);
		goto out;
	}
	spin_unlock_irqrestore(&cm.lock, flags);
	/* Make sure the port haven't released the mad yet */
	mad_agent = cm_id_priv->av.port->mad_agent;
	ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
	if (IS_ERR(ah))
		return PTR_ERR(ah);
	if (!mad_agent) {
		pr_info("%s: not a valid MAD agent\n", __func__);
		ret = -ENODEV;
		goto out;
	}
	ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
	if (IS_ERR(ah)) {
		ret = PTR_ERR(ah);
		goto out;
	}

	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
			       cm_id_priv->av.pkey_index,
			       av->pkey_index,
			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
			       GFP_ATOMIC,
			       IB_MGMT_BASE_VERSION);
	if (IS_ERR(m)) {
		ib_destroy_ah(ah);
		return PTR_ERR(m);
		ret = PTR_ERR(m);
		goto out;
	}

	/* Timeout set by caller if response is expected. */
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
	atomic_inc(&cm_id_priv->refcount);
	m->context[0] = cm_id_priv;
	*msg = m;
	return 0;

out:
	spin_unlock_irqrestore(&cm.state_lock, flags2);
	return ret;
}

static int cm_alloc_response_msg(struct cm_port *port,
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
			   grh, &av->ah_attr);
}

static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
			      struct cm_id_private *cm_id_priv)
{
	struct cm_device *cm_dev;
	struct cm_port *port = NULL;
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
			     &av->ah_attr);
	av->timeout = path->packet_life_time + 1;

	return 0;
	spin_lock_irqsave(&cm.lock, flags);
	if (&cm_id_priv->av == av)
		list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
	else if (&cm_id_priv->alt_av == av)
		list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
	else
		ret = -EINVAL;

	spin_unlock_irqrestore(&cm.lock, flags);

	return ret;
}

static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
	spin_lock_init(&cm_id_priv->lock);
	init_completion(&cm_id_priv->comp);
	INIT_LIST_HEAD(&cm_id_priv->work_list);
	INIT_LIST_HEAD(&cm_id_priv->prim_list);
	INIT_LIST_HEAD(&cm_id_priv->altr_list);
	atomic_set(&cm_id_priv->work_count, -1);
	atomic_set(&cm_id_priv->refcount, 1);
	return &cm_id_priv->id;
@@ -892,6 +945,15 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
		break;
	}

	spin_lock_irq(&cm.lock);
	if (!list_empty(&cm_id_priv->altr_list) &&
	    (!cm_id_priv->altr_send_port_not_ready))
		list_del(&cm_id_priv->altr_list);
	if (!list_empty(&cm_id_priv->prim_list) &&
	    (!cm_id_priv->prim_send_port_not_ready))
		list_del(&cm_id_priv->prim_list);
	spin_unlock_irq(&cm.lock);

	cm_free_id(cm_id->local_id);
	cm_deref_id(cm_id_priv);
	wait_for_completion(&cm_id_priv->comp);
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
		goto out;
	}

	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
				 cm_id_priv);
	if (ret)
		goto error1;
	if (param->alternate_path) {
		ret = cm_init_av_by_path(param->alternate_path,
					 &cm_id_priv->alt_av);
					 &cm_id_priv->alt_av, cm_id_priv);
		if (ret)
			goto error1;
	}
@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
			dev_put(gid_attr.ndev);
		}
		work->path[0].gid_type = gid_attr.gid_type;
		ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
		ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
					 cm_id_priv);
	}
	if (ret) {
		int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
		goto rejected;
	}
	if (req_msg->alt_local_lid) {
		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
					 cm_id_priv);
		if (ret) {
			ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
				       &work->path[0].sgid,
@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
		goto out;
	}

	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
				 cm_id_priv);
	if (ret)
		goto out;
	cm_id_priv->alt_av.timeout =
@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
				work->mad_recv_wc->recv_buf.grh,
				&cm_id_priv->av);
	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
			   cm_id_priv);
	ret = atomic_inc_and_test(&cm_id_priv->work_count);
	if (!ret)
		list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
		return -EINVAL;

	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
	ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
	ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
	if (ret)
		goto out;

@@ -3468,7 +3535,9 @@ static int cm_establish(struct ib_cm_id *cm_id)
static int cm_migrate(struct ib_cm_id *cm_id)
{
	struct cm_id_private *cm_id_priv;
	struct cm_av tmp_av;
	unsigned long flags;
	int tmp_send_port_not_ready;
	int ret = 0;

	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
		cm_id->lap_state = IB_CM_LAP_IDLE;
		/* Swap address vector */
		tmp_av = cm_id_priv->av;
		cm_id_priv->av = cm_id_priv->alt_av;
		cm_id_priv->alt_av = tmp_av;
		/* Swap port send ready state */
		tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
		cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
		cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
	} else
		ret = -EINVAL;
	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
		port->cm_dev = cm_dev;
		port->port_num = i;

		INIT_LIST_HEAD(&port->cm_priv_prim_list);
		INIT_LIST_HEAD(&port->cm_priv_altr_list);

		ret = cm_create_port_fs(port);
		if (ret)
			goto error1;
@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
{
	struct cm_device *cm_dev = client_data;
	struct cm_port *port;
	struct cm_id_private *cm_id_priv;
	struct ib_mad_agent *cur_mad_agent;
	struct ib_port_modify port_modify = {
		.clr_port_cap_mask = IB_PORT_CM_SUP
	};
@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)

		port = cm_dev->port[i-1];
		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
		/* Mark all the cm_id's as not valid */
		spin_lock_irq(&cm.lock);
		list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
			cm_id_priv->altr_send_port_not_ready = 1;
		list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
			cm_id_priv->prim_send_port_not_ready = 1;
		spin_unlock_irq(&cm.lock);
		/*
		 * We flush the queue here after the going_down set, this
		 * verify that no new works will be queued in the recv handler,
		 * after that we can call the unregister_mad_agent
		 */
		flush_workqueue(cm.wq);
		ib_unregister_mad_agent(port->mad_agent);
		spin_lock_irq(&cm.state_lock);
		cur_mad_agent = port->mad_agent;
		port->mad_agent = NULL;
		spin_unlock_irq(&cm.state_lock);
		ib_unregister_mad_agent(cur_mad_agent);
		cm_remove_port_fs(port);
	}

	device_unregister(cm_dev->device);
	kfree(cm_dev);
}
@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
	INIT_LIST_HEAD(&cm.device_list);
	rwlock_init(&cm.device_lock);
	spin_lock_init(&cm.lock);
	spin_lock_init(&cm.state_lock);
	cm.listen_service_table = RB_ROOT;
	cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
	cm.remote_id_table = RB_ROOT;
+20 −1
Original line number Diff line number Diff line
@@ -2436,6 +2436,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
	return 0;
}

static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
					   unsigned long supported_gids,
					   enum ib_gid_type default_gid)
{
	if ((network_type == RDMA_NETWORK_IPV4 ||
	     network_type == RDMA_NETWORK_IPV6) &&
	    test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
		return IB_GID_TYPE_ROCE_UDP_ENCAP;

	return default_gid;
}

static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
	struct rdma_route *route = &id_priv->id.route;
@@ -2461,6 +2473,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
	route->num_paths = 1;

	if (addr->dev_addr.bound_dev_if) {
		unsigned long supported_gids;

		ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
		if (!ndev) {
			ret = -ENODEV;
@@ -2484,7 +2498,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)

		route->path_rec->net = &init_net;
		route->path_rec->ifindex = ndev->ifindex;
		route->path_rec->gid_type = id_priv->gid_type;
		supported_gids = roce_gid_type_mask_support(id_priv->id.device,
							    id_priv->id.port_num);
		route->path_rec->gid_type =
			cma_route_gid_type(addr->dev_addr.network,
					   supported_gids,
					   id_priv->gid_type);
	}
	if (!ndev) {
		ret = -ENODEV;
+1 −1
Original line number Diff line number Diff line
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,

	cur_base = addr & PAGE_MASK;

	if (npages == 0) {
	if (npages == 0 || npages > UINT_MAX) {
		ret = -EINVAL;
		goto out;
	}
+2 −5
Original line number Diff line number Diff line
@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
			container_of(uobj, struct ib_uqp_object, uevent.uobject);

		idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
		if (qp != qp->real_qp) {
			ib_close_qp(qp);
		} else {
		if (qp == qp->real_qp)
			ib_uverbs_detach_umcast(qp, uqp);
		ib_destroy_qp(qp);
		}
		ib_uverbs_release_uevent(file, &uqp->uevent);
		kfree(uqp);
	}
Loading