Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ce866e2d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull hdi1 rdma driver updates from Doug Ledford:
 "This is the first pull request of the 4.9 merge window for the RDMA
  subsystem. It is only the hfi1 driver. It had dependencies on code
  that only landed late in the 4.7-rc cycle (around 4.7-rc7), so putting
  this with my other for-next code would have create an ugly merge of
  lot of 4.7-rc stuff. For that reason, it's being submitted
  individually. It's been through 0day and linux-next"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (37 commits)
  IB/rdmavt: Trivial function comment corrected.
  IB/hfi1: Fix trace of atomic ack
  IB/hfi1: Update SMA ingress checks for response packets
  IB/hfi1: Use EPROM platform configuration read
  IB/hfi1: Add ability to read platform config from the EPROM
  IB/hfi1: Restore EPROM read ability
  IB/hfi1: Document new sysfs entries for hfi1 driver
  IB/hfi1: Add new debugfs sdma_cpu_list file
  IB/hfi1: Add irq affinity notification handler
  IB/hfi1: Add a new VL sysfs attribute for sdma engines
  IB/hfi1: Add sysfs interface for affinity setup
  IB/hfi1: Fix resource release in context allocation
  IB/hfi1: Remove unused variable from devdata
  IB/hfi1: Cleanup tasklet refs in comments
  IB/hfi1: Adjust hardware buffering parameter
  IB/hfi1: Act on external device timeout
  IB/hfi1: Fix defered ack race with qp destroy
  IB/hfi1: Combine shift copy and byte copy for SGE reads
  IB/hfi1: Do not read more than a SGE length
  IB/hfi1: Extend i2c timeout
  ...
parents 19fe4165 61347fa6
Loading
Loading
Loading
Loading
+30 −0
Original line number Diff line number Diff line
@@ -89,6 +89,36 @@ HFI1
   nctxts - number of allowed contexts (PSM2)
   chip_reset - diagnostic (root only)
   boardversion - board version

   sdma<N>/ - one directory per sdma engine (0 - 15)
	sdma<N>/cpu_list - read-write, list of cpus for user-process to sdma
			   engine assignment.
	sdma<N>/vl - read-only, vl the sdma engine maps to.

	The new interface will give the user control on the affinity settings
	for the hfi1 device.
	As an example, to set an sdma engine irq affinity and thread affinity
	of a user processes to use the sdma engine, which is "near" in terms
	of NUMA configuration, or physical cpu location, the user will do:

	echo "3" > /proc/irq/<N>/smp_affinity_list
	echo "4-7" > /sys/devices/.../sdma3/cpu_list
	cat /sys/devices/.../sdma3/vl
	0
	echo "8" > /proc/irq/<M>/smp_affinity_list
	echo "9-12" > /sys/devices/.../sdma4/cpu_list
	cat /sys/devices/.../sdma4/vl
	1

	to make sure that when a process runs on cpus 4,5,6, or 7,
	and uses vl=0, then sdma engine 3 is selected by the driver,
	and also the interrupt of the sdma engine 3 is steered to cpu 3.
	Similarly, when a process runs on cpus 9,10,11, or 12 and sets vl=1,
	then engine 4 will be selected and the irq of the sdma engine 4 is
	steered to cpu 8.
	This assumes that in the above N is the irq number of "sdma3",
	and M is irq number of "sdma4" in the /proc/interrupts file.

   ports/1/
          CCMgtA/
               cc_settings_bin - CCA tables used by PSM2
+152 −51
Original line number Diff line number Diff line
@@ -47,6 +47,7 @@
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/interrupt.h>

#include "hfi.h"
#include "affinity.h"
@@ -55,7 +56,7 @@

struct hfi1_affinity_node_list node_affinity = {
	.list = LIST_HEAD_INIT(node_affinity.list),
	.lock = __SPIN_LOCK_UNLOCKED(&node_affinity.lock),
	.lock = __MUTEX_INITIALIZER(node_affinity.lock)
};

/* Name of IRQ types, indexed by enum irq_type */
@@ -159,14 +160,14 @@ void node_affinity_destroy(void)
	struct list_head *pos, *q;
	struct hfi1_affinity_node *entry;

	spin_lock(&node_affinity.lock);
	mutex_lock(&node_affinity.lock);
	list_for_each_safe(pos, q, &node_affinity.list) {
		entry = list_entry(pos, struct hfi1_affinity_node,
				   list);
		list_del(pos);
		kfree(entry);
	}
	spin_unlock(&node_affinity.lock);
	mutex_unlock(&node_affinity.lock);
	kfree(hfi1_per_node_cntr);
}

@@ -233,9 +234,8 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
	if (cpumask_first(local_mask) >= nr_cpu_ids)
		local_mask = topology_core_cpumask(0);

	spin_lock(&node_affinity.lock);
	mutex_lock(&node_affinity.lock);
	entry = node_affinity_lookup(dd->node);
	spin_unlock(&node_affinity.lock);

	/*
	 * If this is the first time this NUMA node's affinity is used,
@@ -246,6 +246,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
		if (!entry) {
			dd_dev_err(dd,
				   "Unable to allocate global affinity node\n");
			mutex_unlock(&node_affinity.lock);
			return -ENOMEM;
		}
		init_cpu_mask_set(&entry->def_intr);
@@ -302,15 +303,113 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
					     &entry->general_intr_mask);
		}

		spin_lock(&node_affinity.lock);
		node_affinity_add_tail(entry);
		spin_unlock(&node_affinity.lock);
	}

	mutex_unlock(&node_affinity.lock);
	return 0;
}

int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
/*
 * Function updates the irq affinity hint for msix after it has been changed
 * by the user using the /proc/irq interface. This function only accepts
 * one cpu in the mask.
 */
static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
{
	struct sdma_engine *sde = msix->arg;
	struct hfi1_devdata *dd = sde->dd;
	struct hfi1_affinity_node *entry;
	struct cpu_mask_set *set;
	int i, old_cpu;

	if (cpu > num_online_cpus() || cpu == sde->cpu)
		return;

	mutex_lock(&node_affinity.lock);
	entry = node_affinity_lookup(dd->node);
	if (!entry)
		goto unlock;

	old_cpu = sde->cpu;
	sde->cpu = cpu;
	cpumask_clear(&msix->mask);
	cpumask_set_cpu(cpu, &msix->mask);
	dd_dev_dbg(dd, "IRQ vector: %u, type %s engine %u -> cpu: %d\n",
		   msix->msix.vector, irq_type_names[msix->type],
		   sde->this_idx, cpu);
	irq_set_affinity_hint(msix->msix.vector, &msix->mask);

	/*
	 * Set the new cpu in the hfi1_affinity_node and clean
	 * the old cpu if it is not used by any other IRQ
	 */
	set = &entry->def_intr;
	cpumask_set_cpu(cpu, &set->mask);
	cpumask_set_cpu(cpu, &set->used);
	for (i = 0; i < dd->num_msix_entries; i++) {
		struct hfi1_msix_entry *other_msix;

		other_msix = &dd->msix_entries[i];
		if (other_msix->type != IRQ_SDMA || other_msix == msix)
			continue;

		if (cpumask_test_cpu(old_cpu, &other_msix->mask))
			goto unlock;
	}
	cpumask_clear_cpu(old_cpu, &set->mask);
	cpumask_clear_cpu(old_cpu, &set->used);
unlock:
	mutex_unlock(&node_affinity.lock);
}

static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
				     const cpumask_t *mask)
{
	int cpu = cpumask_first(mask);
	struct hfi1_msix_entry *msix = container_of(notify,
						    struct hfi1_msix_entry,
						    notify);

	/* Only one CPU configuration supported currently */
	hfi1_update_sdma_affinity(msix, cpu);
}

static void hfi1_irq_notifier_release(struct kref *ref)
{
	/*
	 * This is required by affinity notifier. We don't have anything to
	 * free here.
	 */
}

static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
{
	struct irq_affinity_notify *notify = &msix->notify;

	notify->irq = msix->msix.vector;
	notify->notify = hfi1_irq_notifier_notify;
	notify->release = hfi1_irq_notifier_release;

	if (irq_set_affinity_notifier(notify->irq, notify))
		pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
		       notify->irq);
}

static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
{
	struct irq_affinity_notify *notify = &msix->notify;

	if (irq_set_affinity_notifier(notify->irq, NULL))
		pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
		       notify->irq);
}

/*
 * Function sets the irq affinity for msix.
 * It *must* be called with node_affinity.lock held.
 */
static int get_irq_affinity(struct hfi1_devdata *dd,
			    struct hfi1_msix_entry *msix)
{
	int ret;
	cpumask_var_t diff;
@@ -328,9 +427,7 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
	if (!ret)
		return -ENOMEM;

	spin_lock(&node_affinity.lock);
	entry = node_affinity_lookup(dd->node);
	spin_unlock(&node_affinity.lock);

	switch (msix->type) {
	case IRQ_SDMA:
@@ -360,7 +457,6 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
	 * finds its CPU here.
	 */
	if (cpu == -1 && set) {
		spin_lock(&node_affinity.lock);
		if (cpumask_equal(&set->mask, &set->used)) {
			/*
			 * We've used up all the CPUs, bump up the generation
@@ -372,17 +468,6 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
		cpumask_andnot(diff, &set->mask, &set->used);
		cpu = cpumask_first(diff);
		cpumask_set_cpu(cpu, &set->used);
		spin_unlock(&node_affinity.lock);
	}

	switch (msix->type) {
	case IRQ_SDMA:
		sde->cpu = cpu;
		break;
	case IRQ_GENERAL:
	case IRQ_RCVCTXT:
	case IRQ_OTHER:
		break;
	}

	cpumask_set_cpu(cpu, &msix->mask);
@@ -391,10 +476,25 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
		    extra, cpu);
	irq_set_affinity_hint(msix->msix.vector, &msix->mask);

	if (msix->type == IRQ_SDMA) {
		sde->cpu = cpu;
		hfi1_setup_sdma_notifier(msix);
	}

	free_cpumask_var(diff);
	return 0;
}

int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
{
	int ret;

	mutex_lock(&node_affinity.lock);
	ret = get_irq_affinity(dd, msix);
	mutex_unlock(&node_affinity.lock);
	return ret;
}

void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
			   struct hfi1_msix_entry *msix)
{
@@ -402,13 +502,13 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
	struct hfi1_ctxtdata *rcd;
	struct hfi1_affinity_node *entry;

	spin_lock(&node_affinity.lock);
	mutex_lock(&node_affinity.lock);
	entry = node_affinity_lookup(dd->node);
	spin_unlock(&node_affinity.lock);

	switch (msix->type) {
	case IRQ_SDMA:
		set = &entry->def_intr;
		hfi1_cleanup_sdma_notifier(msix);
		break;
	case IRQ_GENERAL:
		/* Don't do accounting for general contexts */
@@ -420,21 +520,21 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
			set = &entry->rcv_intr;
		break;
	default:
		mutex_unlock(&node_affinity.lock);
		return;
	}

	if (set) {
		spin_lock(&node_affinity.lock);
		cpumask_andnot(&set->used, &set->used, &msix->mask);
		if (cpumask_empty(&set->used) && set->gen) {
			set->gen--;
			cpumask_copy(&set->used, &set->mask);
		}
		spin_unlock(&node_affinity.lock);
	}

	irq_set_affinity_hint(msix->msix.vector, NULL);
	cpumask_clear(&msix->mask);
	mutex_unlock(&node_affinity.lock);
}

/* This should be called with node_affinity.lock held */
@@ -535,7 +635,7 @@ int hfi1_get_proc_affinity(int node)
	if (!ret)
		goto free_available_mask;

	spin_lock(&affinity->lock);
	mutex_lock(&affinity->lock);
	/*
	 * If we've used all available HW threads, clear the mask and start
	 * overloading.
@@ -643,7 +743,8 @@ int hfi1_get_proc_affinity(int node)
		cpu = -1;
	else
		cpumask_set_cpu(cpu, &set->used);
	spin_unlock(&affinity->lock);

	mutex_unlock(&affinity->lock);
	hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);

	free_cpumask_var(intrs_mask);
@@ -664,19 +765,17 @@ void hfi1_put_proc_affinity(int cpu)

	if (cpu < 0)
		return;
	spin_lock(&affinity->lock);

	mutex_lock(&affinity->lock);
	cpumask_clear_cpu(cpu, &set->used);
	hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
	if (cpumask_empty(&set->used) && set->gen) {
		set->gen--;
		cpumask_copy(&set->used, &set->mask);
	}
	spin_unlock(&affinity->lock);
	mutex_unlock(&affinity->lock);
}

/* Prevents concurrent reads and writes of the sdma_affinity attrib */
static DEFINE_MUTEX(sdma_affinity_mutex);

int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
			   size_t count)
{
@@ -684,16 +783,19 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
	cpumask_var_t mask;
	int ret, i;

	spin_lock(&node_affinity.lock);
	mutex_lock(&node_affinity.lock);
	entry = node_affinity_lookup(dd->node);
	spin_unlock(&node_affinity.lock);

	if (!entry)
		return -EINVAL;
	if (!entry) {
		ret = -EINVAL;
		goto unlock;
	}

	ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
	if (!ret)
		return -ENOMEM;
	if (!ret) {
		ret = -ENOMEM;
		goto unlock;
	}

	ret = cpulist_parse(buf, mask);
	if (ret)
@@ -705,13 +807,11 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
		goto out;
	}

	mutex_lock(&sdma_affinity_mutex);
	/* reset the SDMA interrupt affinity details */
	init_cpu_mask_set(&entry->def_intr);
	cpumask_copy(&entry->def_intr.mask, mask);
	/*
	 * Reassign the affinity for each SDMA interrupt.
	 */

	/* Reassign the affinity for each SDMA interrupt. */
	for (i = 0; i < dd->num_msix_entries; i++) {
		struct hfi1_msix_entry *msix;

@@ -719,14 +819,15 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
		if (msix->type != IRQ_SDMA)
			continue;

		ret = hfi1_get_irq_affinity(dd, msix);
		ret = get_irq_affinity(dd, msix);

		if (ret)
			break;
	}
	mutex_unlock(&sdma_affinity_mutex);
out:
	free_cpumask_var(mask);
unlock:
	mutex_unlock(&node_affinity.lock);
	return ret ? ret : strnlen(buf, PAGE_SIZE);
}

@@ -734,15 +835,15 @@ int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
{
	struct hfi1_affinity_node *entry;

	spin_lock(&node_affinity.lock);
	mutex_lock(&node_affinity.lock);
	entry = node_affinity_lookup(dd->node);
	spin_unlock(&node_affinity.lock);

	if (!entry)
	if (!entry) {
		mutex_unlock(&node_affinity.lock);
		return -EINVAL;
	}

	mutex_lock(&sdma_affinity_mutex);
	cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
	mutex_unlock(&sdma_affinity_mutex);
	mutex_unlock(&node_affinity.lock);
	return strnlen(buf, PAGE_SIZE);
}
+1 −2
Original line number Diff line number Diff line
@@ -121,8 +121,7 @@ struct hfi1_affinity_node_list {
	int num_core_siblings;
	int num_online_nodes;
	int num_online_cpus;
	/* protect affinity node list */
	spinlock_t lock;
	struct mutex lock; /* protects affinity nodes */
};

int node_affinity_init(void);
+27 −30
Original line number Diff line number Diff line
@@ -971,7 +971,9 @@ static struct flag_table dc8051_info_err_flags[] = {
	FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
	FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
	FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
	FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT)
	FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
	FLAG_ENTRY0("External Device Request Timeout",
		    EXTERNAL_DEVICE_REQ_TIMEOUT),
};

/*
@@ -6825,7 +6827,6 @@ void handle_link_up(struct work_struct *work)
		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
				     OPA_LINKDOWN_REASON_SPEED_POLICY);
		set_link_state(ppd, HLS_DN_OFFLINE);
		tune_serdes(ppd);
		start_link(ppd);
	}
}
@@ -6998,13 +6999,11 @@ void handle_link_down(struct work_struct *work)
	 * If there is no cable attached, turn the DC off. Otherwise,
	 * start the link bring up.
	 */
	if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
	if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
		dc_shutdown(ppd->dd);
	} else {
		tune_serdes(ppd);
	else
		start_link(ppd);
}
}

void handle_link_bounce(struct work_struct *work)
{
@@ -7016,7 +7015,6 @@ void handle_link_bounce(struct work_struct *work)
	 */
	if (ppd->host_link_state & HLS_UP) {
		set_link_state(ppd, HLS_DN_OFFLINE);
		tune_serdes(ppd);
		start_link(ppd);
	} else {
		dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
@@ -7531,7 +7529,6 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
				     OPA_LINKDOWN_REASON_WIDTH_POLICY);
		set_link_state(ppd, HLS_DN_OFFLINE);
		tune_serdes(ppd);
		start_link(ppd);
	}
}
@@ -9161,6 +9158,12 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
 */
int start_link(struct hfi1_pportdata *ppd)
{
	/*
	 * Tune the SerDes to a ballpark setting for optimal signal and bit
	 * error rate.  Needs to be done before starting the link.
	 */
	tune_serdes(ppd);

	if (!ppd->link_enabled) {
		dd_dev_info(ppd->dd,
			    "%s: stopping link start because link is disabled\n",
@@ -9401,8 +9404,6 @@ void qsfp_event(struct work_struct *work)
		 */
		set_qsfp_int_n(ppd, 1);

		tune_serdes(ppd);

		start_link(ppd);
	}

@@ -9544,11 +9545,6 @@ static void try_start_link(struct hfi1_pportdata *ppd)
	}
	ppd->qsfp_retry_count = 0;

	/*
	 * Tune the SerDes to a ballpark setting for optimal signal and bit
	 * error rate.  Needs to be done before starting the link.
	 */
	tune_serdes(ppd);
	start_link(ppd);
}

@@ -9718,12 +9714,12 @@ void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
}

struct hfi1_message_header *hfi1_get_msgheader(
struct ib_header *hfi1_get_msgheader(
	struct hfi1_devdata *dd, __le32 *rhf_addr)
{
	u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));

	return (struct hfi1_message_header *)
	return (struct ib_header *)
		(rhf_addr - dd->rhf_offset + offset);
}

@@ -11559,10 +11555,10 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
	    !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
		/* reset the tail and hdr addresses, and sequence count */
		write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
				rcd->rcvhdrq_phys);
				rcd->rcvhdrq_dma);
		if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
					rcd->rcvhdrqtailaddr_phys);
					rcd->rcvhdrqtailaddr_dma);
		rcd->seq_cnt = 1;

		/* reset the cached receive header queue head value */
@@ -11627,9 +11623,9 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
		 * update with a dummy tail address and then disable
		 * receive context.
		 */
		if (dd->rcvhdrtail_dummy_physaddr) {
		if (dd->rcvhdrtail_dummy_dma) {
			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
					dd->rcvhdrtail_dummy_physaddr);
					dd->rcvhdrtail_dummy_dma);
			/* Enabling RcvCtxtCtrl.TailUpd is intentional. */
			rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
		}
@@ -11640,7 +11636,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
		rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
	if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
		rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
	if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
	if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
		rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
	if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
		/* See comment on RcvCtxtCtrl.TailUpd above */
@@ -11712,7 +11708,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
		 * so it doesn't contain an address that is invalid.
		 */
		write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
				dd->rcvhdrtail_dummy_physaddr);
				dd->rcvhdrtail_dummy_dma);
}

u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
@@ -13389,9 +13385,9 @@ static void init_rbufs(struct hfi1_devdata *dd)
		/*
		 * Give up after 1ms - maximum wait time.
		 *
		 * RBuf size is 148KiB.  Slowest possible is PCIe Gen1 x1 at
		 * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
		 * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
		 *	148 KB / (66% * 250MB/s) = 920us
		 *	136 KB / (66% * 250MB/s) = 844us
		 */
		if (count++ > 500) {
			dd_dev_err(dd,
@@ -14570,6 +14566,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
	if (ret)
		goto bail_cleanup;

	/* call before get_platform_config(), after init_chip_resources() */
	ret = eprom_init(dd);
	if (ret)
		goto bail_free_rcverr;

	/* Needs to be called before hfi1_firmware_init */
	get_platform_config(dd);

@@ -14690,10 +14691,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
	if (ret)
		goto bail_free_cntrs;

	ret = eprom_init(dd);
	if (ret)
		goto bail_free_rcverr;

	goto bail;

bail_free_rcverr:
+5 −3
Original line number Diff line number Diff line
@@ -82,7 +82,7 @@
 */
#define CM_VAU 3
/* HFI link credit count, AKA receive buffer depth (RBUF_DEPTH) */
#define CM_GLOBAL_CREDITS 0x940
#define CM_GLOBAL_CREDITS 0x880
/* Number of PKey entries in the HW */
#define MAX_PKEY_VALUES 16

@@ -254,12 +254,14 @@
#define FAILED_LNI_VERIFY_CAP2		BIT(10)
#define FAILED_LNI_CONFIGLT		BIT(11)
#define HOST_HANDSHAKE_TIMEOUT		BIT(12)
#define EXTERNAL_DEVICE_REQ_TIMEOUT	BIT(13)

#define FAILED_LNI (FAILED_LNI_POLLING | FAILED_LNI_DEBOUNCE \
			| FAILED_LNI_ESTBCOMM | FAILED_LNI_OPTEQ \
			| FAILED_LNI_VERIFY_CAP1 \
			| FAILED_LNI_VERIFY_CAP2 \
			| FAILED_LNI_CONFIGLT | HOST_HANDSHAKE_TIMEOUT)
			| FAILED_LNI_CONFIGLT | HOST_HANDSHAKE_TIMEOUT \
			| EXTERNAL_DEVICE_REQ_TIMEOUT)

/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG - host message flags */
#define HOST_REQ_DONE		BIT(0)
@@ -1336,7 +1338,7 @@ enum {
u64 get_all_cpu_total(u64 __percpu *cntr);
void hfi1_start_cleanup(struct hfi1_devdata *dd);
void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
struct hfi1_message_header *hfi1_get_msgheader(
struct ib_header *hfi1_get_msgheader(
				struct hfi1_devdata *dd, __le32 *rhf_addr);
int hfi1_init_ctxt(struct send_context *sc);
void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
Loading