Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3341713c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tags 'for-linus' and 'for-next' of...

Merge tags 'for-linus' and 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull more rdma updates from Doug Ledford:
 "As mentioned in my first pull request, this is the subsequent pull
  requests I had. This is all I have, and in fact this cleans out the
  RDMA subsystem's entire patchworks queue of kernel changes that are
  ready to go (well, it did for the weekend anyway, a few new patches
  are in, but they'll be coming during the -rc cycle).

  The first tag contains a single patch that would have conflicted if
  taken from my tree or DaveM's tree as it needed our trees merged to
  come cleanly.

  The second tag contains the patch series from Intel plus three other
  stragllers that came in late last week. I took them because it allowed
  me to legitimately claim that the RDMA patchworks queue was, for a
  short time, 100% cleared of all waiting kernel patches, woohoo! :-).

  I have it under my for-next tag, so it did get 0day and linux- next
  over the end of last week, and linux-next did show one minor conflict.

  Summary:

  'for-linus' tag:
   - mlx5/IPoIB fixup patch

  'for-next' tag:
   - the hfi1 15 patch set that landed late
   - IPoIB get_link_ksettings which landed late because I asked for a
     respin
   - one late rxe change
   - one -rc worthy fix that's in early"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/mlx5: Enable IPoIB acceleration

* tag 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  rxe: expose num_possible_cpus() cnum_comp_vectors
  IB/rxe: Update caller's CRC for RXE_MEM_TYPE_DMA memory type
  IB/hfi1: Clean up on context initialization failure
  IB/hfi1: Fix an assign/ordering issue with shared context IDs
  IB/hfi1: Clean up context initialization
  IB/hfi1: Correctly clear the pkey
  IB/hfi1: Search shared contexts on the opened device, not all devices
  IB/hfi1: Remove atomic operations for SDMA_REQ_HAVE_AHG bit
  IB/hfi1: Use filedata rather than filepointer
  IB/hfi1: Name function prototype parameters
  IB/hfi1: Fix a subcontext memory leak
  IB/hfi1: Return an error on memory allocation failure
  IB/hfi1: Adjust default eager_buffer_size to 8MB
  IB/hfi1: Get rid of divide when setting the tx request header
  IB/hfi1: Fix yield logic in send engine
  IB/hfi1, IB/rdmavt: Move r_adefered to r_lock cache line
  IB/hfi1: Fix checks for Offline transient state
  IB/ipoib: add get_link_ksettings in ethtool
Loading
Loading
Loading
Loading
+20 −27
Original line number Diff line number Diff line
@@ -1055,7 +1055,7 @@ static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
static void set_partition_keys(struct hfi1_pportdata *);
static void set_partition_keys(struct hfi1_pportdata *ppd);
static const char *link_state_name(u32 state);
static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
					  u32 state);
@@ -1068,9 +1068,9 @@ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
				  int msecs);
static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
static void handle_temp_err(struct hfi1_devdata *);
static void dc_shutdown(struct hfi1_devdata *);
static void dc_start(struct hfi1_devdata *);
static void handle_temp_err(struct hfi1_devdata *dd);
static void dc_shutdown(struct hfi1_devdata *dd);
static void dc_start(struct hfi1_devdata *dd);
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
			   unsigned int *np);
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
@@ -10233,7 +10233,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
	if (pstate == PLS_OFFLINE) {
		do_transition = 0;	/* in right state */
		do_wait = 0;		/* ...no need to wait */
	} else if ((pstate & 0xff) == PLS_OFFLINE) {
	} else if ((pstate & 0xf0) == PLS_OFFLINE) {
		do_transition = 0;	/* in an offline transient state */
		do_wait = 1;		/* ...wait for it to settle */
	} else {
@@ -12662,7 +12662,7 @@ u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
#define SET_STATIC_RATE_CONTROL_SMASK(r) \
(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)

int hfi1_init_ctxt(struct send_context *sc)
void hfi1_init_ctxt(struct send_context *sc)
{
	if (sc) {
		struct hfi1_devdata *dd = sc->dd;
@@ -12679,7 +12679,6 @@ int hfi1_init_ctxt(struct send_context *sc)
		write_kctxt_csr(dd, sc->hw_context,
				SEND_CTXT_CHECK_ENABLE, reg);
	}
	return 0;
}

int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
@@ -14528,30 +14527,24 @@ int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
	return ret;
}

int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
{
	struct hfi1_ctxtdata *rcd;
	unsigned sctxt;
	int ret = 0;
	u8 hw_ctxt;
	u64 reg;

	if (ctxt < dd->num_rcv_contexts) {
		rcd = dd->rcd[ctxt];
	} else {
		ret = -EINVAL;
		goto done;
	}
	if (!rcd || !rcd->sc) {
		ret = -EINVAL;
		goto done;
	}
	sctxt = rcd->sc->hw_context;
	reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
	if (!ctxt || !ctxt->sc)
		return -EINVAL;

	if (ctxt->ctxt >= dd->num_rcv_contexts)
		return -EINVAL;

	hw_ctxt = ctxt->sc->hw_context;
	reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
	reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
	write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
done:
	return ret;
	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);

	return 0;
}

/*
+6 −4
Original line number Diff line number Diff line
@@ -636,7 +636,8 @@ static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt,
	write_csr(dd, offset0 + (0x1000 * ctxt), value);
}

u64 create_pbc(struct hfi1_pportdata *ppd, u64, int, u32, u32);
u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
	       u32 dw_len);

/* firmware.c */
#define SBUS_MASTER_BROADCAST 0xfd
@@ -728,7 +729,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd);
void set_intr_state(struct hfi1_devdata *dd, u32 enable);
void apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
				 int refresh_widths);
void update_usrhead(struct hfi1_ctxtdata *, u32, u32, u32, u32, u32);
void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
		    u32 intr_adjust, u32 npkts);
int stop_drain_data_vls(struct hfi1_devdata *dd);
int open_fill_data_vls(struct hfi1_devdata *dd);
u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns);
@@ -1347,7 +1349,7 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd);
void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
struct ib_header *hfi1_get_msgheader(
				struct hfi1_devdata *dd, __le32 *rhf_addr);
int hfi1_init_ctxt(struct send_context *sc);
void hfi1_init_ctxt(struct send_context *sc);
void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
		  u32 type, unsigned long pa, u16 order);
void hfi1_quiet_serdes(struct hfi1_pportdata *ppd);
@@ -1360,7 +1362,7 @@ int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val);
int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey);
int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt);
int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey);
int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt);
int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt);
void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd);
+3 −39
Original line number Diff line number Diff line
@@ -85,8 +85,8 @@ module_param_named(cu, hfi1_cu, uint, S_IRUGO);
MODULE_PARM_DESC(cu, "Credit return units");

unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
static int hfi1_caps_set(const char *, const struct kernel_param *);
static int hfi1_caps_get(char *, const struct kernel_param *);
static int hfi1_caps_set(const char *val, const struct kernel_param *kp);
static int hfi1_caps_get(char *buffer, const struct kernel_param *kp);
static const struct kernel_param_ops cap_ops = {
	.set = hfi1_caps_set,
	.get = hfi1_caps_get
@@ -210,42 +210,6 @@ int hfi1_count_active_units(void)
	return nunits_active;
}

/*
 * Return count of all units, optionally return in arguments
 * the number of usable (present) units, and the number of
 * ports that are up.
 */
int hfi1_count_units(int *npresentp, int *nupp)
{
	int nunits = 0, npresent = 0, nup = 0;
	struct hfi1_devdata *dd;
	unsigned long flags;
	int pidx;
	struct hfi1_pportdata *ppd;

	spin_lock_irqsave(&hfi1_devs_lock, flags);

	list_for_each_entry(dd, &hfi1_dev_list, list) {
		nunits++;
		if ((dd->flags & HFI1_PRESENT) && dd->kregbase)
			npresent++;
		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
			ppd = dd->pport + pidx;
			if (ppd->lid && ppd->linkup)
				nup++;
		}
	}

	spin_unlock_irqrestore(&hfi1_devs_lock, flags);

	if (npresentp)
		*npresentp = npresent;
	if (nupp)
		*nupp = nup;

	return nunits;
}

/*
 * Get address of eager buffer from it's index (allocated in chunks, not
 * contiguous).
@@ -1325,7 +1289,7 @@ int hfi1_reset_device(int unit)
	if (dd->rcd)
		for (i = dd->first_dyn_alloc_ctxt;
		     i < dd->num_rcv_contexts; i++) {
			if (!dd->rcd[i] || !dd->rcd[i]->cnt)
			if (!dd->rcd[i])
				continue;
			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
			ret = -EBUSY;
+216 −209
Original line number Diff line number Diff line
@@ -49,6 +49,7 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/sched/mm.h>
#include <linux/bitmap.h>

#include <rdma/ib.h>

@@ -70,30 +71,37 @@
/*
 * File operation functions
 */
static int hfi1_file_open(struct inode *, struct file *);
static int hfi1_file_close(struct inode *, struct file *);
static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
static int hfi1_file_mmap(struct file *, struct vm_area_struct *);

static u64 kvirt_to_phys(void *);
static int assign_ctxt(struct file *, struct hfi1_user_info *);
static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
static int user_init(struct file *);
static int get_ctxt_info(struct file *, void __user *, __u32);
static int get_base_info(struct file *, void __user *, __u32);
static int setup_ctxt(struct file *);
static int setup_subctxt(struct hfi1_ctxtdata *);
static int get_user_context(struct file *, struct hfi1_user_info *, int);
static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
static int allocate_ctxt(struct file *, struct hfi1_devdata *,
			 struct hfi1_user_info *);
static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
static unsigned int poll_next(struct file *, struct poll_table_struct *);
static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
static int vma_fault(struct vm_fault *);
static int hfi1_file_open(struct inode *inode, struct file *fp);
static int hfi1_file_close(struct inode *inode, struct file *fp);
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);

static u64 kvirt_to_phys(void *addr);
static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo);
static int init_subctxts(struct hfi1_ctxtdata *uctxt,
			 const struct hfi1_user_info *uinfo);
static int init_user_ctxt(struct hfi1_filedata *fd);
static void user_init(struct hfi1_ctxtdata *uctxt);
static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
			 __u32 len);
static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
			 __u32 len);
static int setup_base_ctxt(struct hfi1_filedata *fd);
static int setup_subctxt(struct hfi1_ctxtdata *uctxt);

static int find_sub_ctxt(struct hfi1_filedata *fd,
			 const struct hfi1_user_info *uinfo);
static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
			 struct hfi1_user_info *uinfo);
static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
			  unsigned long events);
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
		       int start_stop);
static int vma_fault(struct vm_fault *vmf);
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
			    unsigned long arg);

@@ -173,6 +181,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
					       struct hfi1_devdata,
					       user_cdev);

	if (!((dd->flags & HFI1_PRESENT) && dd->kregbase))
		return -EINVAL;

	if (!atomic_inc_not_zero(&dd->user_refcount))
		return -ENXIO;

@@ -187,6 +198,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
		fd->rec_cpu_num = -1; /* no cpu affinity by default */
		fd->mm = current->mm;
		mmgrab(fd->mm);
		fd->dd = dd;
		fp->private_data = fd;
	} else {
		fp->private_data = NULL;
@@ -229,20 +241,14 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
				   sizeof(uinfo)))
			return -EFAULT;

		ret = assign_ctxt(fp, &uinfo);
		if (ret < 0)
			return ret;
		ret = setup_ctxt(fp);
		if (ret)
			return ret;
		ret = user_init(fp);
		ret = assign_ctxt(fd, &uinfo);
		break;
	case HFI1_IOCTL_CTXT_INFO:
		ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg,
		ret = get_ctxt_info(fd, (void __user *)(unsigned long)arg,
				    sizeof(struct hfi1_ctxt_info));
		break;
	case HFI1_IOCTL_USER_INFO:
		ret = get_base_info(fp, (void __user *)(unsigned long)arg,
		ret = get_base_info(fd, (void __user *)(unsigned long)arg,
				    sizeof(struct hfi1_base_info));
		break;
	case HFI1_IOCTL_CREDIT_UPD:
@@ -256,7 +262,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
				   sizeof(tinfo)))
			return -EFAULT;

		ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
		ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
		if (!ret) {
			/*
			 * Copy the number of tidlist entries we used
@@ -278,7 +284,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
				   sizeof(tinfo)))
			return -EFAULT;

		ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
		ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
		if (ret)
			break;
		addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
@@ -293,7 +299,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
				   sizeof(tinfo)))
			return -EFAULT;

		ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
		ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
		if (ret)
			break;
		addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
@@ -430,7 +436,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
		unsigned long count = 0;

		ret = hfi1_user_sdma_process_request(
			kiocb->ki_filp,	(struct iovec *)(from->iov + done),
			fd, (struct iovec *)(from->iov + done),
			dim, &count);
		if (ret) {
			reqs = ret;
@@ -756,6 +762,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
	/* release the cpu */
	hfi1_put_proc_affinity(fdata->rec_cpu_num);

	/* clean up rcv side */
	hfi1_user_exp_rcv_free(fdata);

	/*
	 * Clear any left over, unhandled events so the next process that
	 * gets this context doesn't get confused.
@@ -764,8 +773,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
			   HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
	*ev = 0;

	if (--uctxt->cnt) {
		uctxt->active_slaves &= ~(1 << fdata->subctxt);
	__clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
	if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
		mutex_unlock(&hfi1_mutex);
		goto done;
	}
@@ -795,8 +804,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)

	dd->rcd[uctxt->ctxt] = NULL;

	hfi1_user_exp_rcv_free(fdata);
	hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
	hfi1_user_exp_rcv_grp_free(uctxt);
	hfi1_clear_ctxt_pkey(dd, uctxt);

	uctxt->rcvwait_to = 0;
	uctxt->piowait_to = 0;
@@ -836,89 +845,96 @@ static u64 kvirt_to_phys(void *addr)
	return paddr;
}

static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
{
	int i_minor, ret = 0;
	int ret;
	unsigned int swmajor, swminor;

	swmajor = uinfo->userversion >> 16;
	if (swmajor != HFI1_USER_SWMAJOR) {
		ret = -ENODEV;
		goto done;
	}
	if (swmajor != HFI1_USER_SWMAJOR)
		return -ENODEV;

	swminor = uinfo->userversion & 0xffff;

	mutex_lock(&hfi1_mutex);
	/* First, lets check if we need to setup a shared context? */
	/*
	 * Get a sub context if necessary.
	 * ret < 0 error, 0 no context, 1 sub-context found
	 */
	ret = 0;
	if (uinfo->subctxt_cnt) {
		struct hfi1_filedata *fd = fp->private_data;

		ret = find_shared_ctxt(fp, uinfo);
		if (ret < 0)
			goto done_unlock;
		if (ret) {
		ret = find_sub_ctxt(fd, uinfo);
		if (ret > 0)
			fd->rec_cpu_num =
				hfi1_get_proc_affinity(fd->uctxt->numa_id);
	}
	}

	/*
	 * We execute the following block if we couldn't find a
	 * shared context or if context sharing is not required.
	 * Allocate a base context if context sharing is not required or we
	 * couldn't find a sub context.
	 */
	if (!ret) {
		i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
		ret = get_user_context(fp, uinfo, i_minor);
	}
done_unlock:
	mutex_unlock(&hfi1_mutex);
done:
	return ret;
}

static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
			    int devno)
{
	struct hfi1_devdata *dd = NULL;
	int devmax, npresent, nup;
	if (!ret)
		ret = allocate_ctxt(fd, fd->dd, uinfo);

	devmax = hfi1_count_units(&npresent, &nup);
	if (!npresent)
		return -ENXIO;
	mutex_unlock(&hfi1_mutex);

	if (!nup)
		return -ENETDOWN;
	/* Depending on the context type, do the appropriate init */
	if (ret > 0) {
		/*
		 * sub-context info can only be set up after the base
		 * context has been completed.
		 */
		ret = wait_event_interruptible(fd->uctxt->wait, !test_bit(
					       HFI1_CTXT_BASE_UNINIT,
					       &fd->uctxt->event_flags));
		if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) {
			clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
			return -ENOMEM;
		}
		/* The only thing a sub context needs is the user_xxx stuff */
		if (!ret)
			ret = init_user_ctxt(fd);

	dd = hfi1_lookup(devno);
	if (!dd)
		return -ENODEV;
	else if (!dd->freectxts)
		return -EBUSY;
		if (ret)
			clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
	} else if (!ret) {
		ret = setup_base_ctxt(fd);
		if (fd->uctxt->subctxt_cnt) {
			/* If there is an error, set the failed bit. */
			if (ret)
				set_bit(HFI1_CTXT_BASE_FAILED,
					&fd->uctxt->event_flags);
			/*
			 * Base context is done, notify anybody using a
			 * sub-context that is waiting for this completion
			 */
			clear_bit(HFI1_CTXT_BASE_UNINIT,
				  &fd->uctxt->event_flags);
			wake_up(&fd->uctxt->wait);
		}
	}

	return allocate_ctxt(fp, dd, uinfo);
	return ret;
}

static int find_shared_ctxt(struct file *fp,
/*
 * The hfi1_mutex must be held when this function is called.  It is
 * necessary to ensure serialized access to the bitmask in_use_ctxts.
 */
static int find_sub_ctxt(struct hfi1_filedata *fd,
			 const struct hfi1_user_info *uinfo)
{
	int devmax, ndev, i;
	int ret = 0;
	struct hfi1_filedata *fd = fp->private_data;

	devmax = hfi1_count_units(NULL, NULL);

	for (ndev = 0; ndev < devmax; ndev++) {
		struct hfi1_devdata *dd = hfi1_lookup(ndev);
	int i;
	struct hfi1_devdata *dd = fd->dd;
	u16 subctxt;

		if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
			continue;
		for (i = dd->first_dyn_alloc_ctxt;
		     i < dd->num_rcv_contexts; i++) {
	for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
		struct hfi1_ctxtdata *uctxt = dd->rcd[i];

		/* Skip ctxts which are not yet open */
			if (!uctxt || !uctxt->cnt)
		if (!uctxt ||
		    bitmap_empty(uctxt->in_use_ctxts,
				 HFI1_MAX_SHARED_CTXTS))
			continue;

		/* Skip dynamically allocted kernel contexts */
@@ -934,29 +950,30 @@ static int find_shared_ctxt(struct file *fp,
			continue;

		/* Verify the sharing process matches the master */
			if (uctxt->userversion != uinfo->userversion ||
			    uctxt->cnt >= uctxt->subctxt_cnt) {
				ret = -EINVAL;
				goto done;
			}
		if (uctxt->userversion != uinfo->userversion)
			return -EINVAL;

		/* Find an unused context */
		subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
					      HFI1_MAX_SHARED_CTXTS);
		if (subctxt >= uctxt->subctxt_cnt)
			return -EBUSY;

		fd->uctxt = uctxt;
			fd->subctxt  = uctxt->cnt++;
			uctxt->active_slaves |= 1 << fd->subctxt;
			ret = 1;
			goto done;
		}
		fd->subctxt = subctxt;
		__set_bit(fd->subctxt, uctxt->in_use_ctxts);

		return 1;
	}

done:
	return ret;
	return 0;
}

static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
			 struct hfi1_user_info *uinfo)
{
	struct hfi1_filedata *fd = fp->private_data;
	struct hfi1_ctxtdata *uctxt;
	unsigned ctxt;
	unsigned int ctxt;
	int ret, numa;

	if (dd->flags & HFI1_FROZEN) {
@@ -970,6 +987,14 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
		return -EIO;
	}

	/*
	 * This check is sort of redundant to the next EBUSY error. It would
	 * also indicate an inconsistancy in the driver if this value was
	 * zero, but there were still contexts available.
	 */
	if (!dd->freectxts)
		return -EBUSY;

	for (ctxt = dd->first_dyn_alloc_ctxt;
	     ctxt < dd->num_rcv_contexts; ctxt++)
		if (!dd->rcd[ctxt])
@@ -1013,12 +1038,12 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
		goto ctxdata_free;

	/*
	 * Setup shared context resources if the user-level has requested
	 * shared contexts and this is the 'master' process.
	 * Setup sub context resources if the user-level has requested
	 * sub contexts.
	 * This has to be done here so the rest of the sub-contexts find the
	 * proper master.
	 */
	if (uinfo->subctxt_cnt && !fd->subctxt) {
	if (uinfo->subctxt_cnt) {
		ret = init_subctxts(uctxt, uinfo);
		/*
		 * On error, we don't need to disable and de-allocate the
@@ -1055,7 +1080,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
static int init_subctxts(struct hfi1_ctxtdata *uctxt,
			 const struct hfi1_user_info *uinfo)
{
	unsigned num_subctxts;
	u16 num_subctxts;

	num_subctxts = uinfo->subctxt_cnt;
	if (num_subctxts > HFI1_MAX_SHARED_CTXTS)
@@ -1063,9 +1088,8 @@ static int init_subctxts(struct hfi1_ctxtdata *uctxt,

	uctxt->subctxt_cnt = uinfo->subctxt_cnt;
	uctxt->subctxt_id = uinfo->subctxt_id;
	uctxt->active_slaves = 1;
	uctxt->redirect_seq_cnt = 1;
	set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
	set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);

	return 0;
}
@@ -1073,13 +1097,12 @@ static int init_subctxts(struct hfi1_ctxtdata *uctxt,
static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
{
	int ret = 0;
	unsigned num_subctxts = uctxt->subctxt_cnt;
	u16 num_subctxts = uctxt->subctxt_cnt;

	uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
	if (!uctxt->subctxt_uregbase) {
		ret = -ENOMEM;
		goto bail;
	}
	if (!uctxt->subctxt_uregbase)
		return -ENOMEM;

	/* We can take the size of the RcvHdr Queue from the master */
	uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
						  num_subctxts);
@@ -1094,25 +1117,22 @@ static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
		ret = -ENOMEM;
		goto bail_rhdr;
	}
	goto bail;

	return 0;

bail_rhdr:
	vfree(uctxt->subctxt_rcvhdr_base);
	uctxt->subctxt_rcvhdr_base = NULL;
bail_ureg:
	vfree(uctxt->subctxt_uregbase);
	uctxt->subctxt_uregbase = NULL;
bail:

	return ret;
}

static int user_init(struct file *fp)
static void user_init(struct hfi1_ctxtdata *uctxt)
{
	unsigned int rcvctrl_ops = 0;
	struct hfi1_filedata *fd = fp->private_data;
	struct hfi1_ctxtdata *uctxt = fd->uctxt;

	/* make sure that the context has already been setup */
	if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
		return -EFAULT;

	/* initialize poll variables... */
	uctxt->urgent = 0;
@@ -1160,20 +1180,12 @@ static int user_init(struct file *fp)
	else
		rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
	hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);

	/* Notify any waiting slaves */
	if (uctxt->subctxt_cnt) {
		clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
		wake_up(&uctxt->wait);
}

	return 0;
}

static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
			 __u32 len)
{
	struct hfi1_ctxt_info cinfo;
	struct hfi1_filedata *fd = fp->private_data;
	struct hfi1_ctxtdata *uctxt = fd->uctxt;
	int ret = 0;

@@ -1211,75 +1223,71 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
	return ret;
}

static int setup_ctxt(struct file *fp)
static int init_user_ctxt(struct hfi1_filedata *fd)
{
	struct hfi1_ctxtdata *uctxt = fd->uctxt;
	int ret;

	ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
	if (ret)
		return ret;

	ret = hfi1_user_exp_rcv_init(fd);

	return ret;
}

static int setup_base_ctxt(struct hfi1_filedata *fd)
{
	struct hfi1_filedata *fd = fp->private_data;
	struct hfi1_ctxtdata *uctxt = fd->uctxt;
	struct hfi1_devdata *dd = uctxt->dd;
	int ret = 0;

	/*
	 * Context should be set up only once, including allocation and
	 * programming of eager buffers. This is done if context sharing
	 * is not requested or by the master process.
	 */
	if (!uctxt->subctxt_cnt || !fd->subctxt) {
		ret = hfi1_init_ctxt(uctxt->sc);
		if (ret)
			goto done;
	hfi1_init_ctxt(uctxt->sc);

	/* Now allocate the RcvHdr queue and eager buffers. */
	ret = hfi1_create_rcvhdrq(dd, uctxt);
	if (ret)
			goto done;
		return ret;

	ret = hfi1_setup_eagerbufs(uctxt);
	if (ret)
			goto done;
		if (uctxt->subctxt_cnt && !fd->subctxt) {
		goto setup_failed;

	/* If sub-contexts are enabled, do the appropriate setup */
	if (uctxt->subctxt_cnt)
		ret = setup_subctxt(uctxt);
	if (ret)
				goto done;
		}
	} else {
		ret = wait_event_interruptible(uctxt->wait, !test_bit(
					       HFI1_CTXT_MASTER_UNINIT,
					       &uctxt->event_flags));
		if (ret)
			goto done;
	}
		goto setup_failed;

	ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
	ret = hfi1_user_exp_rcv_grp_init(fd);
	if (ret)
		goto done;
	/*
	 * Expected receive has to be setup for all processes (including
	 * shared contexts). However, it has to be done after the master
	 * context has been fully configured as it depends on the
	 * eager/expected split of the RcvArray entries.
	 * Setting it up here ensures that the subcontexts will be waiting
	 * (due to the above wait_event_interruptible() until the master
	 * is setup.
	 */
	ret = hfi1_user_exp_rcv_init(fp);
		goto setup_failed;

	ret = init_user_ctxt(fd);
	if (ret)
		goto done;
		goto setup_failed;

	set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
done:
	user_init(uctxt);

	return 0;

setup_failed:
	hfi1_free_ctxtdata(dd, uctxt);
	return ret;
}

static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
			 __u32 len)
{
	struct hfi1_base_info binfo;
	struct hfi1_filedata *fd = fp->private_data;
	struct hfi1_ctxtdata *uctxt = fd->uctxt;
	struct hfi1_devdata *dd = uctxt->dd;
	ssize_t sz;
	unsigned offset;
	int ret = 0;

	trace_hfi1_uctxtdata(uctxt->dd, uctxt);
	trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);

	memset(&binfo, 0, sizeof(binfo));
	binfo.hw_version = dd->revision;
@@ -1443,7 +1451,7 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
 * overflow conditions.  start_stop==1 re-enables, to be used to
 * re-init the software copy of the head register
 */
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
		       int start_stop)
{
	struct hfi1_devdata *dd = uctxt->dd;
@@ -1478,7 +1486,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
 * User process then performs actions appropriate to bit having been
 * set, if desired, and checks again in future.
 */
static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
			  unsigned long events)
{
	int i;
@@ -1499,8 +1507,7 @@ static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
	return 0;
}

static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
			 u16 pkey)
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
{
	int ret = -ENOENT, i, intable = 0;
	struct hfi1_pportdata *ppd = uctxt->ppd;
+50 −55

File changed.

Preview size limit exceeded, changes collapsed.

Loading