Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 147d9e7b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Doug Ledford:
 "This is the first -rc pull for the RDMA subsystem.  The patch count is
  high, but they are all smallish patches fixing simple things for the
  most part, and the overall line count of changes here is smaller than
  the patch count would lead a person to believe.

  Code is up and running in my labs, including direct testing of cxgb4,
  mlx4, mlx5, ocrdma, and qib.

  Summary:

   - Multiple minor fixes to the rdma core
   - Multiple minor fixes to hfi1
   - Multiple minor fixes to mlx5
   - A very few other minor fixes (SRP, IPoIB, usNIC, mlx4)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (35 commits)
  IB/IPoIB: Don't update neigh validity for unresolved entries
  IB/mlx5: Fix alternate path code
  IB/mlx5: Fix pkey_index length in the QP path record
  IB/mlx5: Fix entries check in mlx5_ib_resize_cq
  IB/mlx5: Fix entries checks in mlx5_ib_create_cq
  IB/mlx5: Check BlueFlame HCA support
  IB/mlx5: Fix returned values of query QP
  IB/mlx5: Limit query HCA clock
  IB/mlx5: Fix FW version diaplay in sysfs
  IB/mlx5: Return PORT_ERR in Active to Initializing tranisition
  IB/mlx5: Set flow steering capability bit
  IB/core: Make all casts in ib_device_cap_flags enum consistent
  IB/core: Fix bit curruption in ib_device_cap_flags structure
  IB/core: Initialize sysfs attributes before sysfs create group
  IB/IPoIB: Disable bottom half when dealing with device address
  IB/core: Fix removal of default GID cache entry
  IB/IPoIB: Fix race between ipoib_remove_one to sysfs functions
  IB/core: Fix query port failure in RoCE
  IB/core: fix error unwind in sysfs hw counters code
  IB/core: Fix array length allocation
  ...
parents 52e7d46c 61c78eea
Loading
Loading
Loading
Loading
+8 −2
Original line number Diff line number Diff line
@@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
{
	int ret = 0;
	struct net_device *old_net_dev;
	enum ib_gid_type old_gid_type;

	/* in rdma_cap_roce_gid_table, this funciton should be protected by a
	 * sleep-able lock.
@@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
	}

	old_net_dev = table->data_vec[ix].attr.ndev;
	old_gid_type = table->data_vec[ix].attr.gid_type;
	if (old_net_dev && old_net_dev != attr->ndev)
		dev_put(old_net_dev);
	/* if modify_gid failed, just delete the old gid */
@@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
		attr = &zattr;
		table->data_vec[ix].context = NULL;
	}
	if (default_gid)
		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;

	memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
	memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
	if (default_gid) {
		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
		if (action == GID_TABLE_WRITE_ACTION_DEL)
			table->data_vec[ix].attr.gid_type = old_gid_type;
	}
	if (table->data_vec[ix].attr.ndev &&
	    table->data_vec[ix].attr.ndev != old_net_dev)
		dev_hold(table->data_vec[ix].attr.ndev);
+2 −2
Original line number Diff line number Diff line
@@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
	work->cm_event.event = IB_CM_USER_ESTABLISHED;

	/* Check if the device started its remove_one */
	spin_lock_irq(&cm.lock);
	spin_lock_irqsave(&cm.lock, flags);
	if (!cm_dev->going_down) {
		queue_delayed_work(cm.wq, &work->work, 0);
	} else {
		kfree(work);
		ret = -ENODEV;
	}
	spin_unlock_irq(&cm.lock);
	spin_unlock_irqrestore(&cm.lock, flags);

out:
	return ret;
+5 −1
Original line number Diff line number Diff line
@@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device,
	if (err || port_attr->subnet_prefix)
		return err;

	if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
		return 0;

	err = ib_query_gid(device, port_num, 0, &gid, NULL);
	if (err)
		return err;
@@ -1024,7 +1027,8 @@ static int __init ib_core_init(void)
		goto err_mad;
	}

	if (ib_add_ibnl_clients()) {
	ret = ib_add_ibnl_clients();
	if (ret) {
		pr_warn("Couldn't register ibnl clients\n");
		goto err_sa;
	}
+16 −8
Original line number Diff line number Diff line
@@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
			   u8 port_num)
{
	struct attribute_group *hsag = NULL;
	struct attribute_group *hsag;
	struct rdma_hw_stats *stats;
	int i = 0, ret;
	int i, ret;

	stats = device->alloc_hw_stats(device, port_num);

@@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
		return;

	if (!stats->names || stats->num_counters <= 0)
		goto err;
		goto err_free_stats;

	/*
	 * Two extra attribue elements here, one for the lifespan entry and
	 * one to NULL terminate the list for the sysfs core code
	 */
	hsag = kzalloc(sizeof(*hsag) +
		       // 1 extra for the lifespan config entry
		       sizeof(void *) * (stats->num_counters + 1),
		       sizeof(void *) * (stats->num_counters + 2),
		       GFP_KERNEL);
	if (!hsag)
		return;
		goto err_free_stats;

	ret = device->get_hw_stats(device, stats, port_num,
				   stats->num_counters);
	if (ret != stats->num_counters)
		goto err;
		goto err_free_hsag;

	stats->timestamp = jiffies;

@@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
		hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
		if (!hsag->attrs[i])
			goto err;
		sysfs_attr_init(hsag->attrs[i]);
	}

	/* treat an error here as non-fatal */
	hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
	if (hsag->attrs[i])
		sysfs_attr_init(hsag->attrs[i]);

	if (port) {
		struct kobject *kobj = &port->kobj;
@@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
	return;

err:
	kfree(stats);
	for (; i >= 0; i--)
		kfree(hsag->attrs[i]);
err_free_hsag:
	kfree(hsag);
err_free_stats:
	kfree(stats);
	return;
}

+14 −17
Original line number Diff line number Diff line
@@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
	const struct cpumask *node_mask,
		*proc_mask = tsk_cpus_allowed(current);
	struct cpu_mask_set *set = &dd->affinity->proc;
	char buf[1024];

	/*
	 * check whether process/context affinity has already
	 * been set
	 */
	if (cpumask_weight(proc_mask) == 1) {
		scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
			  current->pid, current->comm, buf);
		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
			  current->pid, current->comm,
			  cpumask_pr_args(proc_mask));
		/*
		 * Mark the pre-set CPU as used. This is atomic so we don't
		 * need the lock
@@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
		cpumask_set_cpu(cpu, &set->used);
		goto done;
	} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
		scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
			  current->pid, current->comm, buf);
		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
			  current->pid, current->comm,
			  cpumask_pr_args(proc_mask));
		goto done;
	}

@@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
	cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
				  &dd->affinity->rcv_intr.mask :
				  &dd->affinity->rcv_intr.used));
	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
	hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
	hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
		  cpumask_pr_args(intrs));

	/*
	 * If we don't have a NUMA node requested, preference is towards
@@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
	if (node == -1)
		node = dd->node;
	node_mask = cpumask_of_node(node);
	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
	hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
	hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
		  cpumask_pr_args(node_mask));

	/* diff will hold all unused cpus */
	cpumask_andnot(diff, &set->mask, &set->used);
	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
	hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
	hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));

	/* get cpumask of available CPUs on preferred NUMA */
	cpumask_and(mask, diff, node_mask);
	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
	hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
	hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));

	/*
	 * At first, we don't want to place processes on the same
@@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
		cpumask_andnot(diff, &set->mask, &set->used);
		cpumask_andnot(mask, diff, node_mask);
	}
	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
	hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
	hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
		  cpumask_pr_args(mask));

	cpu = cpumask_first(mask);
	if (cpu >= nr_cpu_ids) /* empty */
Loading