Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d295dbeb authored by Michael J. Ruhl's avatar Michael J. Ruhl Committed by Doug Ledford
Browse files

IB/hf1: User context locking is inconsistent



There is a mixture of mutex and spinlocks to protect receive context
(rcd/uctxt) information.  This is not used consistently.

Use the mutex to protect device receive context information only.
Use the spinlock to protect sub context information only.

Protect access to items in the rcd array with a spinlock and
reference count.

Remove spinlock around dd->rcd array cleanup.  Since interrupts are
disabled and cleaned up before this point, this lock is not useful.

Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: default avatarSebastian Sanchez <sebastian.sanchez@intel.com>
Signed-off-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent f2a3bc00
Loading
Loading
Loading
Loading
+23 −12
Original line number Diff line number Diff line
@@ -240,11 +240,14 @@ static inline void aspm_disable_all(struct hfi1_devdata *dd)
	u16 i;

	for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
		rcd = dd->rcd[i];
		rcd = hfi1_rcd_get_by_index(dd, i);
		if (rcd) {
			del_timer_sync(&rcd->aspm_timer);
			spin_lock_irqsave(&rcd->aspm_lock, flags);
			rcd->aspm_intr_enable = false;
			spin_unlock_irqrestore(&rcd->aspm_lock, flags);
			hfi1_rcd_put(rcd);
		}
	}

	aspm_disable(dd);
@@ -264,11 +267,14 @@ static inline void aspm_enable_all(struct hfi1_devdata *dd)
		return;

	for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
		rcd = dd->rcd[i];
		rcd = hfi1_rcd_get_by_index(dd, i);
		if (rcd) {
			spin_lock_irqsave(&rcd->aspm_lock, flags);
			rcd->aspm_intr_enable = true;
			rcd->aspm_enabled = true;
			spin_unlock_irqrestore(&rcd->aspm_lock, flags);
			hfi1_rcd_put(rcd);
		}
	}
}

@@ -284,13 +290,18 @@ static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd)

static inline void aspm_init(struct hfi1_devdata *dd)
{
	struct hfi1_ctxtdata *rcd;
	u16 i;

	spin_lock_init(&dd->aspm_lock);
	dd->aspm_supported = aspm_hw_l1_supported(dd);

	for (i = 0; i < dd->first_dyn_alloc_ctxt; i++)
		aspm_ctx_init(dd->rcd[i]);
	for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
		rcd = hfi1_rcd_get_by_index(dd, i);
		if (rcd)
			aspm_ctx_init(rcd);
		hfi1_rcd_put(rcd);
	}

	/* Start with ASPM disabled */
	aspm_hw_set_l1_ent_latency(dd);
+21 −9
Original line number Diff line number Diff line
@@ -6785,13 +6785,17 @@ static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
static void rxe_freeze(struct hfi1_devdata *dd)
{
	int i;
	struct hfi1_ctxtdata *rcd;

	/* disable port */
	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);

	/* disable all receive contexts */
	for (i = 0; i < dd->num_rcv_contexts; i++)
		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, dd->rcd[i]);
	for (i = 0; i < dd->num_rcv_contexts; i++) {
		rcd = hfi1_rcd_get_by_index(dd, i);
		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
		hfi1_rcd_put(rcd);
	}
}

/*
@@ -6804,20 +6808,23 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
{
	u32 rcvmask;
	u16 i;
	struct hfi1_ctxtdata *rcd;

	/* enable all kernel contexts */
	for (i = 0; i < dd->num_rcv_contexts; i++) {
		struct hfi1_ctxtdata *rcd = dd->rcd[i];
		rcd = hfi1_rcd_get_by_index(dd, i);

		/* Ensure all non-user contexts(including vnic) are enabled */
		if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER))
		if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
			hfi1_rcd_put(rcd);
			continue;

		}
		rcvmask = HFI1_RCVCTRL_CTXT_ENB;
		/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
		rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
		hfi1_rcvctrl(dd, rcvmask, rcd);
		hfi1_rcd_put(rcd);
	}

	/* enable port */
@@ -8104,7 +8111,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
	char *err_detail;

	if (likely(source < dd->num_rcv_contexts)) {
		rcd = dd->rcd[source];
		rcd = hfi1_rcd_get_by_index(dd, source);
		if (rcd) {
			/* Check for non-user contexts, including vnic */
			if ((source < dd->first_dyn_alloc_ctxt) ||
@@ -8112,6 +8119,8 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
				rcd->do_interrupt(rcd, 0);
			else
				handle_user_interrupt(rcd);

			hfi1_rcd_put(rcd);
			return;	/* OK */
		}
		/* received an interrupt, but no rcd */
@@ -8133,12 +8142,14 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
	char *err_detail;

	if (likely(source < dd->num_rcv_contexts)) {
		rcd = dd->rcd[source];
		rcd = hfi1_rcd_get_by_index(dd, source);
		if (rcd) {
			/* only pay attention to user urgent interrupts */
			if ((source >= dd->first_dyn_alloc_ctxt) &&
			    (!rcd->sc || (rcd->sc->type == SC_USER)))
				handle_user_interrupt(rcd);

			hfi1_rcd_put(rcd);
			return;	/* OK */
		}
		/* received an interrupt, but no rcd */
@@ -8343,7 +8354,7 @@ static irqreturn_t receive_context_interrupt(int irq, void *data)
	int disposition;
	int present;

	trace_hfi1_receive_interrupt(dd, rcd->ctxt);
	trace_hfi1_receive_interrupt(dd, rcd);
	this_cpu_inc(*dd->int_counter);
	aspm_ctx_disable(rcd);

@@ -13030,7 +13041,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
			me->type = IRQ_SDMA;
		} else if (first_rx <= i && i < last_rx) {
			idx = i - first_rx;
			rcd = dd->rcd[idx];
			rcd = hfi1_rcd_get_by_index(dd, idx);
			if (rcd) {
				/*
				 * Set the interrupt register and mask for this
@@ -13049,6 +13060,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
				remap_intr(dd, IS_RCVAVAIL_START + idx, i);
				me->type = IRQ_RCVCTXT;
				rcd->msix_intr = i;
				hfi1_rcd_put(rcd);
			}
		} else {
			/* not in our expected range - complain, then
+21 −11
Original line number Diff line number Diff line
@@ -173,12 +173,15 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v)
	u64 n_packets = 0, n_bytes = 0;
	struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
	struct hfi1_devdata *dd = dd_from_dev(ibd);
	struct hfi1_ctxtdata *rcd;

	for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
		if (!dd->rcd[j])
			continue;
		n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
		n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
		rcd = hfi1_rcd_get_by_index(dd, j);
		if (rcd) {
			n_packets += rcd->opstats->stats[i].n_packets;
			n_bytes += rcd->opstats->stats[i].n_bytes;
		}
		hfi1_rcd_put(rcd);
	}
	if (!n_packets && !n_bytes)
		return SEQ_SKIP;
@@ -231,6 +234,7 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
	u64 n_packets = 0;
	struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
	struct hfi1_devdata *dd = dd_from_dev(ibd);
	struct hfi1_ctxtdata *rcd;

	if (v == SEQ_START_TOKEN) {
		seq_puts(s, "Ctx:npkts\n");
@@ -240,11 +244,14 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
	spos = v;
	i = *spos;

	if (!dd->rcd[i])
	rcd = hfi1_rcd_get_by_index(dd, i);
	if (!rcd)
		return SEQ_SKIP;

	for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
		n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
	for (j = 0; j < ARRAY_SIZE(rcd->opstats->stats); j++)
		n_packets += rcd->opstats->stats[j].n_packets;

	hfi1_rcd_put(rcd);

	if (!n_packets)
		return SEQ_SKIP;
@@ -1098,12 +1105,15 @@ static int _fault_stats_seq_show(struct seq_file *s, void *v)
	u64 n_packets = 0, n_bytes = 0;
	struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
	struct hfi1_devdata *dd = dd_from_dev(ibd);
	struct hfi1_ctxtdata *rcd;

	for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
		if (!dd->rcd[j])
			continue;
		n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
		n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
		rcd = hfi1_rcd_get_by_index(dd, j);
		if (rcd) {
			n_packets += rcd->opstats->stats[i].n_packets;
			n_bytes += rcd->opstats->stats[i].n_bytes;
		}
		hfi1_rcd_put(rcd);
	}
	if (!n_packets && !n_bytes)
		return SEQ_SKIP;
+47 −24
Original line number Diff line number Diff line
@@ -839,6 +839,7 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)

static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
{
	struct hfi1_ctxtdata *rcd;
	u16 i;

	/*
@@ -847,18 +848,27 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
	 * interrupt handler for all statically allocated kernel contexts.
	 */
	if (ctxt >= dd->first_dyn_alloc_ctxt) {
		dd->rcd[ctxt]->do_interrupt =
		rcd = hfi1_rcd_get_by_index(dd, ctxt);
		if (rcd) {
			rcd->do_interrupt =
				&handle_receive_interrupt_nodma_rtail;
			hfi1_rcd_put(rcd);
		}
		return;
	}

	for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++)
		dd->rcd[i]->do_interrupt =
	for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
		rcd = hfi1_rcd_get_by_index(dd, i);
		if (rcd)
			rcd->do_interrupt =
				&handle_receive_interrupt_nodma_rtail;
		hfi1_rcd_put(rcd);
	}
}

static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
{
	struct hfi1_ctxtdata *rcd;
	u16 i;

	/*
@@ -867,28 +877,40 @@ static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
	 * interrupt handler for all statically allocated kernel contexts.
	 */
	if (ctxt >= dd->first_dyn_alloc_ctxt) {
		dd->rcd[ctxt]->do_interrupt =
		rcd = hfi1_rcd_get_by_index(dd, ctxt);
		if (rcd) {
			rcd->do_interrupt =
				&handle_receive_interrupt_dma_rtail;
			hfi1_rcd_put(rcd);
		}
		return;
	}

	for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++)
		dd->rcd[i]->do_interrupt =
	for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
		rcd = hfi1_rcd_get_by_index(dd, i);
		if (rcd)
			rcd->do_interrupt =
				&handle_receive_interrupt_dma_rtail;
		hfi1_rcd_put(rcd);
	}
}

void set_all_slowpath(struct hfi1_devdata *dd)
{
	struct hfi1_ctxtdata *rcd;
	u16 i;

	/* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
	for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
		struct hfi1_ctxtdata *rcd = dd->rcd[i];

		rcd = hfi1_rcd_get_by_index(dd, i);
		if (!rcd)
			continue;
		if ((i < dd->first_dyn_alloc_ctxt) ||
		    (rcd && rcd->sc && (rcd->sc->type == SC_KERNEL)))
		    (rcd->sc && (rcd->sc->type == SC_KERNEL))) {
			rcd->do_interrupt = &handle_receive_interrupt;
		}
		hfi1_rcd_put(rcd);
	}
}

static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
@@ -1068,6 +1090,7 @@ void receive_interrupt_work(struct work_struct *work)
	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
						  linkstate_active_work);
	struct hfi1_devdata *dd = ppd->dd;
	struct hfi1_ctxtdata *rcd;
	u16 i;

	/* Received non-SC15 packet implies neighbor_normal */
@@ -1078,8 +1101,12 @@ void receive_interrupt_work(struct work_struct *work)
	 * Interrupt all statically allocated kernel contexts that could
	 * have had an interrupt during auto activation.
	 */
	for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++)
		force_recv_intr(dd->rcd[i]);
	for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) {
		rcd = hfi1_rcd_get_by_index(dd, i);
		if (rcd)
			force_recv_intr(rcd);
		hfi1_rcd_put(rcd);
	}
}

/*
@@ -1270,10 +1297,8 @@ void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
int hfi1_reset_device(int unit)
{
	int ret;
	u16 i;
	struct hfi1_devdata *dd = hfi1_lookup(unit);
	struct hfi1_pportdata *ppd;
	unsigned long flags;
	int pidx;

	if (!dd) {
@@ -1291,17 +1316,15 @@ int hfi1_reset_device(int unit)
		goto bail;
	}

	spin_lock_irqsave(&dd->uctxt_lock, flags);
	/* If there are any user/vnic contexts, we cannot reset */
	mutex_lock(&hfi1_mutex);
	if (dd->rcd)
		for (i = dd->first_dyn_alloc_ctxt;
		     i < dd->num_rcv_contexts; i++) {
			if (!dd->rcd[i])
				continue;
			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
		if (hfi1_stats.sps_ctxts) {
			mutex_unlock(&hfi1_mutex);
			ret = -EBUSY;
			goto bail;
		}
	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
	mutex_unlock(&hfi1_mutex);

	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
		ppd = dd->pport + pidx;
+119 −66
Original line number Diff line number Diff line
@@ -757,7 +757,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
	if (!uctxt)
		goto done;

	hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
	hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);

	flush_wc();
	/* drain user sdma queue */
@@ -769,6 +769,13 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
	/* clean up rcv side */
	hfi1_user_exp_rcv_free(fdata);

	/*
	 * fdata->uctxt is used in the above cleanup.  It is not ready to be
	 * removed until here.
	 */
	fdata->uctxt = NULL;
	hfi1_rcd_put(uctxt);

	/*
	 * Clear any left over, unhandled events so the next process that
	 * gets this context doesn't get confused.
@@ -777,16 +784,14 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
			   HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
	*ev = 0;

	mutex_lock(&hfi1_mutex);
	spin_lock_irqsave(&dd->uctxt_lock, flags);
	__clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
	fdata->uctxt = NULL;
	hfi1_rcd_put(uctxt); /* fdata reference */
	if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
		mutex_unlock(&hfi1_mutex);
		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
		goto done;
	}
	spin_unlock_irqrestore(&dd->uctxt_lock, flags);

	spin_lock_irqsave(&dd->uctxt_lock, flags);
	/*
	 * Disable receive context and interrupt available, reset all
	 * RcvCtxtCtrl bits to default values.
@@ -808,13 +813,11 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
		set_pio_integrity(uctxt->sc);
		sc_disable(uctxt->sc);
	}
	spin_unlock_irqrestore(&dd->uctxt_lock, flags);

	hfi1_free_ctxt_rcv_groups(uctxt);
	hfi1_clear_ctxt_pkey(dd, uctxt);

	uctxt->event_flags = 0;
	mutex_unlock(&hfi1_mutex);

	deallocate_ctxt(uctxt);
done:
@@ -844,9 +847,22 @@ static u64 kvirt_to_phys(void *addr)
	return paddr;
}

/**
 * complete_subctxt
 * @fd: valid filedata pointer
 *
 * Sub-context info can only be set up after the base context
 * has been completed.  This is indicated by the clearing of the
 * HFI1_CTXT_BASE_UINIT bit.
 *
 * Wait for the bit to be cleared, and then complete the subcontext
 * initialization.
 *
 */
static int complete_subctxt(struct hfi1_filedata *fd)
{
	int ret;
	unsigned long flags;

	/*
	 * sub-context info can only be set up after the base context
@@ -859,7 +875,7 @@ static int complete_subctxt(struct hfi1_filedata *fd)
	if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
		ret = -ENOMEM;

	/* The only thing a sub context needs is the user_xxx stuff */
	/* Finish the sub-context init */
	if (!ret) {
		fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
		ret = init_user_ctxt(fd, fd->uctxt);
@@ -868,9 +884,9 @@ static int complete_subctxt(struct hfi1_filedata *fd)
	if (ret) {
		hfi1_rcd_put(fd->uctxt);
		fd->uctxt = NULL;
		mutex_lock(&hfi1_mutex);
		spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
		__clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
		mutex_unlock(&hfi1_mutex);
		spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
	}

	return ret;
@@ -911,14 +927,15 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)

	mutex_unlock(&hfi1_mutex);

	/* Depending on the context type, do the appropriate init */
	/* Depending on the context type, finish the appropriate init */
	switch (ret) {
	case 0:
		ret = setup_base_ctxt(fd, uctxt);
		if (uctxt->subctxt_cnt) {
			/*
			 * Base context is done, notify anybody using a
			 * sub-context that is waiting for this completion
			 * Base context is done (successfully or not), notify
			 * anybody using a sub-context that is waiting for
			 * this completion.
			 */
			clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
			wake_up(&uctxt->wait);
@@ -934,66 +951,105 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
	return ret;
}

/*
 * The hfi1_mutex must be held when this function is called.  It is
 * necessary to ensure serialized creation of shared contexts.
/**
 * match_ctxt
 * @fd: valid filedata pointer
 * @uinfo: user info to compare base context with
 * @uctxt: context to compare uinfo to.
 *
 * Compare the given context with the given information to see if it
 * can be used for a sub context.
 */
static int find_sub_ctxt(struct hfi1_filedata *fd,
			 const struct hfi1_user_info *uinfo)
static int match_ctxt(struct hfi1_filedata *fd,
		      const struct hfi1_user_info *uinfo,
		      struct hfi1_ctxtdata *uctxt)
{
	u16 i;
	struct hfi1_devdata *dd = fd->dd;
	unsigned long flags;
	u16 subctxt;

	if (!uinfo->subctxt_cnt)
		return 0;

	for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
		struct hfi1_ctxtdata *uctxt = dd->rcd[i];

		/* Skip ctxts which are not yet open */
		if (!uctxt ||
		    bitmap_empty(uctxt->in_use_ctxts,
				 HFI1_MAX_SHARED_CTXTS))
			continue;

		/* Skip dynamically allocted kernel contexts */
	/* Skip dynamically allocated kernel contexts */
	if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
			continue;
		return 0;

	/* Skip ctxt if it doesn't match the requested one */
		if (memcmp(uctxt->uuid, uinfo->uuid,
			   sizeof(uctxt->uuid)) ||
	if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
	    uctxt->jkey != generate_jkey(current_uid()) ||
	    uctxt->subctxt_id != uinfo->subctxt_id ||
	    uctxt->subctxt_cnt != uinfo->subctxt_cnt)
			continue;
		return 0;

		/* Verify the sharing process matches the master */
	/* Verify the sharing process matches the base */
	if (uctxt->userversion != uinfo->userversion)
		return -EINVAL;

		/* Find an unused context */
	/* Find an unused sub context */
	spin_lock_irqsave(&dd->uctxt_lock, flags);
	if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
		/* context is being closed, do not use */
		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
		return 0;
	}

	subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
				      HFI1_MAX_SHARED_CTXTS);
		if (subctxt >= uctxt->subctxt_cnt)
	if (subctxt >= uctxt->subctxt_cnt) {
		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
		return -EBUSY;
	}

		fd->uctxt = uctxt;
	fd->subctxt = subctxt;
	__set_bit(fd->subctxt, uctxt->in_use_ctxts);
	spin_unlock_irqrestore(&dd->uctxt_lock, flags);

	fd->uctxt = uctxt;
	hfi1_rcd_get(uctxt);
		__set_bit(fd->subctxt, uctxt->in_use_ctxts);

	return 1;
}

/**
 * find_sub_ctxt
 * @fd: valid filedata pointer
 * @uinfo: matching info to use to find a possible context to share.
 *
 * The hfi1_mutex must be held when this function is called.  It is
 * necessary to ensure serialized creation of shared contexts.
 *
 * Return:
 *    0      No sub-context found
 *    1      Subcontext found and allocated
 *    errno  EINVAL (incorrect parameters)
 *           EBUSY (all sub contexts in use)
 */
static int find_sub_ctxt(struct hfi1_filedata *fd,
			 const struct hfi1_user_info *uinfo)
{
	struct hfi1_ctxtdata *uctxt;
	struct hfi1_devdata *dd = fd->dd;
	u16 i;
	int ret;

	if (!uinfo->subctxt_cnt)
		return 0;

	for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
		uctxt = hfi1_rcd_get_by_index(dd, i);
		if (uctxt) {
			ret = match_ctxt(fd, uinfo, uctxt);
			hfi1_rcd_put(uctxt);
			/* value of != 0 will return */
			if (ret)
				return ret;
		}
	}

	return 0;
}

static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
			 struct hfi1_user_info *uinfo,
			 struct hfi1_ctxtdata **cd)
			 struct hfi1_ctxtdata **rcd)
{
	struct hfi1_ctxtdata *uctxt;
	int ret, numa;
@@ -1066,12 +1122,12 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
	if (dd->freectxts-- == dd->num_user_contexts)
		aspm_disable_all(dd);

	*cd = uctxt;
	*rcd = uctxt;

	return 0;

ctxdata_free:
	hfi1_free_ctxt(dd, uctxt);
	hfi1_free_ctxt(uctxt);
	return ret;
}

@@ -1083,7 +1139,7 @@ static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
		aspm_enable_all(uctxt->dd);
	mutex_unlock(&hfi1_mutex);

	hfi1_free_ctxt(uctxt->dd, uctxt);
	hfi1_free_ctxt(uctxt);
}

static void init_subctxts(struct hfi1_ctxtdata *uctxt,
@@ -1279,8 +1335,10 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
	return 0;

setup_failed:
	/* Set the failed bit so sub-context init can do the right thing */
	set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
	deallocate_ctxt(uctxt);

	return ret;
}

@@ -1417,18 +1475,13 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
	struct hfi1_ctxtdata *uctxt;
	struct hfi1_devdata *dd = ppd->dd;
	u16 ctxt;
	int ret = 0;
	unsigned long flags;

	if (!dd->events) {
		ret = -EINVAL;
		goto done;
	}
	if (!dd->events)
		return -EINVAL;

	spin_lock_irqsave(&dd->uctxt_lock, flags);
	for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts;
	     ctxt++) {
		uctxt = dd->rcd[ctxt];
		uctxt = hfi1_rcd_get_by_index(dd, ctxt);
		if (uctxt) {
			unsigned long *evs = dd->events +
				(uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
@@ -1441,11 +1494,11 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
			set_bit(evtbit, evs);
			for (i = 1; i < uctxt->subctxt_cnt; i++)
				set_bit(evtbit, evs + i);
			hfi1_rcd_put(uctxt);
		}
	}
	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
done:
	return ret;

	return 0;
}

/**
Loading