Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aebe9bb8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Doug Ledford:
 "This is the second batch of queued up rdma patches for this rc cycle.

  There isn't anything really major in here.  It's passed 0day,
  linux-next, and local testing across a wide variety of hardware.
  There are still a few known issues to be tracked down, but this should
  amount to the vast majority of the rdma RC fixes.

  Round two of 4.7 rc fixes:

   - A couple minor fixes to the rdma core
   - Multiple minor fixes to hfi1
   - Multiple minor fixes to mlx4/mlx4
   - A few minor fixes to i40iw"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (31 commits)
  IB/srpt: Reduce QP buffer size
  i40iw: Enable level-1 PBL for fast memory registration
  i40iw: Return correct max_fast_reg_page_list_len
  i40iw: Correct status check on i40iw_get_pble
  i40iw: Correct CQ arming
  IB/rdmavt: Correct qp_priv_alloc() return value test
  IB/hfi1: Don't zero out qp->s_ack_queue in rvt_reset_qp
  IB/hfi1: Fix deadlock with txreq allocation slow path
  IB/mlx4: Prevent cross page boundary allocation
  IB/mlx4: Fix memory leak if QP creation failed
  IB/mlx4: Verify port number in flow steering create flow
  IB/mlx4: Fix error flow when sending mads under SRIOV
  IB/mlx4: Fix the SQ size of an RC QP
  IB/mlx5: Fix wrong naming of port_rcv_data counter
  IB/mlx5: Fix post send fence logic
  IB/uverbs: Initialize ib_qp_init_attr with zeros
  IB/core: Fix false search of the IB_SA_WELL_KNOWN_GUID
  IB/core: Fix RoCE v1 multicast join logic issue
  IB/core: Fix no default GIDs when netdevice reregisters
  IB/hfi1: Send a pkey change event on driver pkey update
  ...
parents 3fb5e59c 9903fd13
Loading
Loading
Loading
Loading
+3 −1
Original line number Original line Diff line number Diff line
@@ -411,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,


	for (ix = 0; ix < table->sz; ix++)
	for (ix = 0; ix < table->sz; ix++)
		if (table->data_vec[ix].attr.ndev == ndev)
		if (table->data_vec[ix].attr.ndev == ndev)
			if (!del_gid(ib_dev, port, table, ix, false))
			if (!del_gid(ib_dev, port, table, ix,
				     !!(table->data_vec[ix].props &
					GID_TABLE_ENTRY_DEFAULT)))
				deleted = true;
				deleted = true;


	write_unlock_irq(&table->rwlock);
	write_unlock_irq(&table->rwlock);
+30 −32
Original line number Original line Diff line number Diff line
@@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
		complete(&id_priv->comp);
		complete(&id_priv->comp);
}
}


static int cma_disable_callback(struct rdma_id_private *id_priv,
				enum rdma_cm_state state)
{
	mutex_lock(&id_priv->handler_mutex);
	if (id_priv->state != state) {
		mutex_unlock(&id_priv->handler_mutex);
		return -EINVAL;
	}
	return 0;
}

struct rdma_cm_id *rdma_create_id(struct net *net,
struct rdma_cm_id *rdma_create_id(struct net *net,
				  rdma_cm_event_handler event_handler,
				  rdma_cm_event_handler event_handler,
				  void *context, enum rdma_port_space ps,
				  void *context, enum rdma_port_space ps,
@@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
	struct rdma_cm_event event;
	struct rdma_cm_event event;
	int ret = 0;
	int ret = 0;


	mutex_lock(&id_priv->handler_mutex);
	if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
	if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
		cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
	     id_priv->state != RDMA_CM_CONNECT) ||
	    (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
	    (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
		cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
	     id_priv->state != RDMA_CM_DISCONNECT))
		return 0;
		goto out;


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
	switch (ib_event->event) {
	switch (ib_event->event) {
@@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e


static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
{
	struct rdma_id_private *listen_id, *conn_id;
	struct rdma_id_private *listen_id, *conn_id = NULL;
	struct rdma_cm_event event;
	struct rdma_cm_event event;
	struct net_device *net_dev;
	struct net_device *net_dev;
	int offset, ret;
	int offset, ret;
@@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
		goto net_dev_put;
		goto net_dev_put;
	}
	}


	if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
	mutex_lock(&listen_id->handler_mutex);
	if (listen_id->state != RDMA_CM_LISTEN) {
		ret = -ECONNABORTED;
		ret = -ECONNABORTED;
		goto net_dev_put;
		goto err1;
	}
	}


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
@@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;


	if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
	mutex_lock(&id_priv->handler_mutex);
		return 0;
	if (id_priv->state != RDMA_CM_CONNECT)
		goto out;


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
	switch (iw_event->event) {
	switch (iw_event->event) {
@@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
		return ret;
		return ret;
	}
	}


out:
	mutex_unlock(&id_priv->handler_mutex);
	mutex_unlock(&id_priv->handler_mutex);
	return ret;
	return ret;
}
}
@@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
	struct rdma_cm_id *new_cm_id;
	struct rdma_cm_id *new_cm_id;
	struct rdma_id_private *listen_id, *conn_id;
	struct rdma_id_private *listen_id, *conn_id;
	struct rdma_cm_event event;
	struct rdma_cm_event event;
	int ret;
	int ret = -ECONNABORTED;
	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;


	listen_id = cm_id->context;
	listen_id = cm_id->context;
	if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))

		return -ECONNABORTED;
	mutex_lock(&listen_id->handler_mutex);
	if (listen_id->state != RDMA_CM_LISTEN)
		goto out;


	/* Create a new RDMA id for the new IW CM ID */
	/* Create a new RDMA id for the new IW CM ID */
	new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
	new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
@@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
	struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
	struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
	int ret = 0;
	int ret = 0;


	if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
	mutex_lock(&id_priv->handler_mutex);
		return 0;
	if (id_priv->state != RDMA_CM_CONNECT)
		goto out;


	memset(&event, 0, sizeof event);
	memset(&event, 0, sizeof event);
	switch (ib_event->event) {
	switch (ib_event->event) {
@@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
	struct rdma_id_private *id_priv;
	struct rdma_id_private *id_priv;
	struct cma_multicast *mc = multicast->context;
	struct cma_multicast *mc = multicast->context;
	struct rdma_cm_event event;
	struct rdma_cm_event event;
	int ret;
	int ret = 0;


	id_priv = mc->id_priv;
	id_priv = mc->id_priv;
	if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
	mutex_lock(&id_priv->handler_mutex);
	    cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
	if (id_priv->state != RDMA_CM_ADDR_BOUND &&
		return 0;
	    id_priv->state != RDMA_CM_ADDR_RESOLVED)
		goto out;


	if (!status)
	if (!status)
		status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
		status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
@@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
		return 0;
		return 0;
	}
	}


out:
	mutex_unlock(&id_priv->handler_mutex);
	mutex_unlock(&id_priv->handler_mutex);
	return 0;
	return 0;
}
}
@@ -3878,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
	gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
	gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
		   rdma_start_port(id_priv->cma_dev->device)];
		   rdma_start_port(id_priv->cma_dev->device)];
	if (addr->sa_family == AF_INET) {
	if (addr->sa_family == AF_INET) {
		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
			mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
			err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
			err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
					    true);
					    true);
		if (!err) {
			if (!err)
				mc->igmp_joined = true;
				mc->igmp_joined = true;
			mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
		}
		}
	} else {
	} else {
		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+1 −1
Original line number Original line Diff line number Diff line
@@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file,
	struct ib_srq			*srq = NULL;
	struct ib_srq			*srq = NULL;
	struct ib_qp			*qp;
	struct ib_qp			*qp;
	char				*buf;
	char				*buf;
	struct ib_qp_init_attr		attr;
	struct ib_qp_init_attr		attr = {};
	struct ib_uverbs_ex_create_qp_resp resp;
	struct ib_uverbs_ex_create_qp_resp resp;
	int				ret;
	int				ret;


+10 −6
Original line number Original line Diff line number Diff line
@@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
		ah_attr->grh.dgid = sgid;
		ah_attr->grh.dgid = sgid;


		if (!rdma_cap_eth_ah(device, port_num)) {
		if (!rdma_cap_eth_ah(device, port_num)) {
			if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
				ret = ib_find_cached_gid_by_port(device, &dgid,
				ret = ib_find_cached_gid_by_port(device, &dgid,
								 IB_GID_TYPE_IB,
								 IB_GID_TYPE_IB,
								 port_num, NULL,
								 port_num, NULL,
								 &gid_index);
								 &gid_index);
				if (ret)
				if (ret)
					return ret;
					return ret;
			} else {
				gid_index = 0;
			}
		}
		}


		ah_attr->grh.sgid_index = (u8) gid_index;
		ah_attr->grh.sgid_index = (u8) gid_index;
+20 −8
Original line number Original line Diff line number Diff line
@@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
static void dc_start(struct hfi1_devdata *);
static void dc_start(struct hfi1_devdata *);
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
			   unsigned int *np);
			   unsigned int *np);
static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);


/*
/*
 * Error interrupt table entry.  This is used as input to the interrupt
 * Error interrupt table entry.  This is used as input to the interrupt
@@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work)
	}
	}


	reset_neighbor_info(ppd);
	reset_neighbor_info(ppd);
	if (ppd->mgmt_allowed)
		remove_full_mgmt_pkey(ppd);


	/* disable the port */
	/* disable the port */
	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
			    __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
			    __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
	ppd->pkeys[2] = FULL_MGMT_P_KEY;
	ppd->pkeys[2] = FULL_MGMT_P_KEY;
	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
	hfi1_event_pkey_change(ppd->dd, ppd->port);
}
}


static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
{
{
	if (ppd->pkeys[2] != 0) {
		ppd->pkeys[2] = 0;
		ppd->pkeys[2] = 0;
		(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
		(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
		hfi1_event_pkey_change(ppd->dd, ppd->port);
	}
}
}


/*
/*
@@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd)
		return 0;
		return 0;
	}
	}


	/*
	 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
	 * pkey table can be configured properly if the HFI unit is connected
	 * to switch port with MgmtAllowed=NO
	 */
	clear_full_mgmt_pkey(ppd);

	return set_link_state(ppd, HLS_DN_POLL);
	return set_link_state(ppd, HLS_DN_POLL);
}
}


@@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd)
	u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
	u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
			      & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
			      & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
		SEND_LEN_CHECK1_LEN_VL15_SHIFT;
		SEND_LEN_CHECK1_LEN_VL15_SHIFT;
	int i;
	int i, j;
	u32 thres;
	u32 thres;


	for (i = 0; i < ppd->vls_supported; i++) {
	for (i = 0; i < ppd->vls_supported; i++) {
@@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd)
			    sc_mtu_to_threshold(dd->vld[i].sc,
			    sc_mtu_to_threshold(dd->vld[i].sc,
						dd->vld[i].mtu,
						dd->vld[i].mtu,
						dd->rcd[0]->rcvhdrqentsize));
						dd->rcd[0]->rcvhdrqentsize));
		sc_set_cr_threshold(dd->vld[i].sc, thres);
		for (j = 0; j < INIT_SC_PER_VL; j++)
			sc_set_cr_threshold(
					pio_select_send_context_vl(dd, j, i),
					    thres);
	}
	}
	thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
	thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
		    sc_mtu_to_threshold(dd->vld[15].sc,
		    sc_mtu_to_threshold(dd->vld[15].sc,
Loading