Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ece8ea7b authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Doug Ledford
Browse files

RDMA/usnic: Do not use ucontext->tgid



Update this driver to match the code it copies from umem.c which no longer
uses tgid.

Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent d4b4dd1b
Loading
Loading
Loading
Loading
+45 −46
Original line number Diff line number Diff line
@@ -54,18 +54,6 @@ static struct workqueue_struct *usnic_uiom_wq;
	((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] -	\
	(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))

static void usnic_uiom_reg_account(struct work_struct *work)
{
	struct usnic_uiom_reg *umem = container_of(work,
						struct usnic_uiom_reg, work);

	down_write(&umem->mm->mmap_sem);
	umem->mm->locked_vm -= umem->diff;
	up_write(&umem->mm->mmap_sem);
	mmput(umem->mm);
	kfree(umem);
}

static int usnic_uiom_dma_fault(struct iommu_domain *domain,
				struct device *dev,
				unsigned long iova, int flags,
@@ -99,8 +87,9 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
}

static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
				int dmasync, struct list_head *chunk_list)
				int dmasync, struct usnic_uiom_reg *uiomr)
{
	struct list_head *chunk_list = &uiomr->chunk_list;
	struct page **page_list;
	struct scatterlist *sg;
	struct usnic_uiom_chunk *chunk;
@@ -114,6 +103,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
	int flags;
	dma_addr_t pa;
	unsigned int gup_flags;
	struct mm_struct *mm;

	/*
	 * If the combination of the addr and size requested for this memory
@@ -136,7 +126,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,

	npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;

	down_write(&current->mm->mmap_sem);
	uiomr->owning_mm = mm = current->mm;
	down_write(&mm->mmap_sem);

	locked = npages + current->mm->pinned_vm;
	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -196,10 +187,12 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
out:
	if (ret < 0)
		usnic_uiom_put_pages(chunk_list, 0);
	else
		current->mm->pinned_vm = locked;
	else {
		mm->pinned_vm = locked;
		mmgrab(uiomr->owning_mm);
	}

	up_write(&current->mm->mmap_sem);
	up_write(&mm->mmap_sem);
	free_page((unsigned long) page_list);
	return ret;
}
@@ -379,7 +372,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
	uiomr->pd = pd;

	err = usnic_uiom_get_pages(addr, size, writable, dmasync,
					&uiomr->chunk_list);
				   uiomr);
	if (err) {
		usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
				vpn_start, vpn_last, err);
@@ -426,29 +419,39 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
out_put_pages:
	usnic_uiom_put_pages(&uiomr->chunk_list, 0);
	spin_unlock(&pd->lock);
	mmdrop(uiomr->owning_mm);
out_free_uiomr:
	kfree(uiomr);
	return ERR_PTR(err);
}

void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
			    struct ib_ucontext *ucontext)
static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
{
	struct task_struct *task;
	struct mm_struct *mm;
	unsigned long diff;
	mmdrop(uiomr->owning_mm);
	kfree(uiomr);
}

	__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
{
	return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
}

	task = get_pid_task(ucontext->tgid, PIDTYPE_PID);
	if (!task)
		goto out;
	mm = get_task_mm(task);
	put_task_struct(task);
	if (!mm)
		goto out;
static void usnic_uiom_release_defer(struct work_struct *work)
{
	struct usnic_uiom_reg *uiomr =
		container_of(work, struct usnic_uiom_reg, work);

	down_write(&uiomr->owning_mm->mmap_sem);
	uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
	up_write(&uiomr->owning_mm->mmap_sem);

	diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
	__usnic_uiom_release_tail(uiomr);
}

void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
			    struct ib_ucontext *context)
{
	__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);

	/*
	 * We may be called with the mm's mmap_sem already held.  This
@@ -456,25 +459,21 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
	 * the last reference to our file and calls our release
	 * method.  If there are memory regions to destroy, we'll end
	 * up here and not be able to take the mmap_sem.  In that case
	 * we defer the vm_locked accounting to the system workqueue.
	 * we defer the vm_locked accounting to a workqueue.
	 */
	if (ucontext->closing) {
		if (!down_write_trylock(&mm->mmap_sem)) {
			INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
			uiomr->mm = mm;
			uiomr->diff = diff;

	if (context->closing) {
		if (!down_write_trylock(&uiomr->owning_mm->mmap_sem)) {
			INIT_WORK(&uiomr->work, usnic_uiom_release_defer);
			queue_work(usnic_uiom_wq, &uiomr->work);
			return;
		}
	} else
		down_write(&mm->mmap_sem);
	} else {
		down_write(&uiomr->owning_mm->mmap_sem);
	}
	uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
	up_write(&uiomr->owning_mm->mmap_sem);

	mm->pinned_vm -= diff;
	up_write(&mm->mmap_sem);
	mmput(mm);
out:
	kfree(uiomr);
	__usnic_uiom_release_tail(uiomr);
}

struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
+1 −2
Original line number Diff line number Diff line
@@ -71,8 +71,7 @@ struct usnic_uiom_reg {
	int				writable;
	struct list_head		chunk_list;
	struct work_struct		work;
	struct mm_struct		*mm;
	unsigned long			diff;
	struct mm_struct		*owning_mm;
};

struct usnic_uiom_chunk {