Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ac83d0ff authored by Alex Deucher's avatar Alex Deucher
Browse files

Revert "drm/amdgpu: support userptr cross VMAs case with HMM"



This reverts commit 5aeaccca.

This depends on an HMM fix which is not upstream yet.

Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b523c3a6
Loading
Loading
Loading
Loading
+35 −91
Original line number Original line Diff line number Diff line
@@ -711,8 +711,7 @@ struct amdgpu_ttm_tt {
	struct task_struct	*usertask;
	struct task_struct	*usertask;
	uint32_t		userflags;
	uint32_t		userflags;
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
	struct hmm_range	*ranges;
	struct hmm_range	range;
	int			nr_ranges;
#endif
#endif
};
};


@@ -724,108 +723,62 @@ struct amdgpu_ttm_tt {
 * once afterwards to stop HMM tracking
 * once afterwards to stop HMM tracking
 */
 */
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)

/* Support Userptr pages cross max 16 vmas */
#define MAX_NR_VMAS	(16)

int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
{
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
	struct mm_struct *mm = gtt->usertask->mm;
	struct mm_struct *mm = gtt->usertask->mm;
	unsigned long start = gtt->userptr;
	unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
	unsigned long end = start + ttm->num_pages * PAGE_SIZE;
	struct hmm_range *range = &gtt->range;
	struct hmm_range *ranges;
	int r = 0, i;
	struct vm_area_struct *vma = NULL, *vmas[MAX_NR_VMAS];
	uint64_t *pfns, f;
	int r = 0, i, nr_pages;


	if (!mm) /* Happens during process shutdown */
	if (!mm) /* Happens during process shutdown */
		return -ESRCH;
		return -ESRCH;


	down_read(&mm->mmap_sem);
	amdgpu_hmm_init_range(range);

	/* user pages may cross multiple VMAs */
	gtt->nr_ranges = 0;
	do {
		unsigned long vm_start;


		if (gtt->nr_ranges >= MAX_NR_VMAS) {
	down_read(&mm->mmap_sem);
			DRM_ERROR("Too many VMAs in userptr range\n");
			r = -EFAULT;
			goto out;
		}


		vm_start = vma ? vma->vm_end : start;
	range->vma = find_vma(mm, gtt->userptr);
		vma = find_vma(mm, vm_start);
	if (!range_in_vma(range->vma, gtt->userptr, end))
		if (unlikely(!vma || vm_start < vma->vm_start)) {
		r = -EFAULT;
		r = -EFAULT;
			goto out;
	else if ((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
		}
		range->vma->vm_file)
		vmas[gtt->nr_ranges++] = vma;
	} while (end > vma->vm_end);

	DRM_DEBUG_DRIVER("0x%lx nr_ranges %d pages 0x%lx\n",
		start, gtt->nr_ranges, ttm->num_pages);

	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
		vmas[0]->vm_file)) {
		r = -EPERM;
		r = -EPERM;
	if (r)
		goto out;
		goto out;
	}


	ranges = kvmalloc_array(gtt->nr_ranges, sizeof(*ranges), GFP_KERNEL);
	range->pfns = kvmalloc_array(ttm->num_pages, sizeof(uint64_t),
	if (unlikely(!ranges)) {
				     GFP_KERNEL);
	if (range->pfns == NULL) {
		r = -ENOMEM;
		r = -ENOMEM;
		goto out;
		goto out;
	}
	}
	range->start = gtt->userptr;
	range->end = end;


	pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
	range->pfns[0] = range->flags[HMM_PFN_VALID];
	if (unlikely(!pfns)) {
	range->pfns[0] |= amdgpu_ttm_tt_is_readonly(ttm) ?
		r = -ENOMEM;
				0 : range->flags[HMM_PFN_WRITE];
		goto out_free_ranges;
	for (i = 1; i < ttm->num_pages; i++)
	}
		range->pfns[i] = range->pfns[0];

	for (i = 0; i < gtt->nr_ranges; i++)
		amdgpu_hmm_init_range(&ranges[i]);

	f = ranges[0].flags[HMM_PFN_VALID];
	f |= amdgpu_ttm_tt_is_readonly(ttm) ?
				0 : ranges[0].flags[HMM_PFN_WRITE];
	memset64(pfns, f, ttm->num_pages);

	for (nr_pages = 0, i = 0; i < gtt->nr_ranges; i++) {
		ranges[i].vma = vmas[i];
		ranges[i].start = max(start, vmas[i]->vm_start);
		ranges[i].end = min(end, vmas[i]->vm_end);
		ranges[i].pfns = pfns + nr_pages;
		nr_pages += (ranges[i].end - ranges[i].start) / PAGE_SIZE;

		r = hmm_vma_fault(&ranges[i], true);
		if (unlikely(r))
			break;
	}
	if (unlikely(r)) {
		while (i--)
			hmm_vma_range_done(&ranges[i]);


	/* This may trigger page table update */
	r = hmm_vma_fault(range, true);
	if (r)
		goto out_free_pfns;
		goto out_free_pfns;
	}


	up_read(&mm->mmap_sem);
	up_read(&mm->mmap_sem);


	for (i = 0; i < ttm->num_pages; i++)
	for (i = 0; i < ttm->num_pages; i++)
		pages[i] = hmm_pfn_to_page(&ranges[0], pfns[i]);
		pages[i] = hmm_pfn_to_page(range, range->pfns[i]);
	gtt->ranges = ranges;


	return 0;
	return 0;


out_free_pfns:
out_free_pfns:
	kvfree(pfns);
	kvfree(range->pfns);
out_free_ranges:
	range->pfns = NULL;
	kvfree(ranges);
out:
out:
	up_read(&mm->mmap_sem);
	up_read(&mm->mmap_sem);

	return r;
	return r;
}
}


@@ -839,23 +792,15 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
{
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
	bool r = false;
	bool r = false;
	int i;


	if (!gtt || !gtt->userptr)
	if (!gtt || !gtt->userptr)
		return false;
		return false;


	DRM_DEBUG_DRIVER("user_pages_done 0x%llx nr_ranges %d pages 0x%lx\n",
	WARN_ONCE(!gtt->range.pfns, "No user pages to check\n");
		gtt->userptr, gtt->nr_ranges, ttm->num_pages);
	if (gtt->range.pfns) {

		r = hmm_vma_range_done(&gtt->range);
	WARN_ONCE(!gtt->ranges || !gtt->ranges[0].pfns,
		kvfree(gtt->range.pfns);
		"No user pages to check\n");
		gtt->range.pfns = NULL;

	if (gtt->ranges) {
		for (i = 0; i < gtt->nr_ranges; i++)
			r |= hmm_vma_range_done(&gtt->ranges[i]);
		kvfree(gtt->ranges[0].pfns);
		kvfree(gtt->ranges);
		gtt->ranges = NULL;
	}
	}


	return r;
	return r;
@@ -939,9 +884,8 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
	sg_free_table(ttm->sg);
	sg_free_table(ttm->sg);


#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
	if (gtt->ranges &&
	if (gtt->range.pfns &&
	    ttm->pages[0] == hmm_pfn_to_page(&gtt->ranges[0],
	    ttm->pages[0] == hmm_pfn_to_page(&gtt->range, gtt->range.pfns[0]))
					     gtt->ranges[0].pfns[0]))
		WARN_ONCE(1, "Missing get_user_page_done\n");
		WARN_ONCE(1, "Missing get_user_page_done\n");
#endif
#endif
}
}