Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d82044e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Michael Ellerman:
 "A one-liner to make our Radix MMU support depend on HUGETLB_PAGE. We
  use some of the hugetlb inlines (eg. pud_huge()) when operating on the
  linear mapping and if they're compiled into empty wrappers we can
  corrupt memory.

  Then two fixes to our VFIO IOMMU code. The first is not a regression
  but fixes the locking to avoid a user-triggerable deadlock.

  The second does fix a regression since rc1, and depends on the first
  fix. It makes it possible to run guests with large amounts of memory
  again (~256GB).

  Thanks to Alexey Kardashevskiy"

* tag 'powerpc-5.1-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/mm_iommu: Allow pinning large regions
  powerpc/mm_iommu: Fix potential deadlock
  powerpc/mm/radix: Make Radix require HUGETLB_PAGE
parents 975a0f40 7a3a4d76
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -266,6 +266,7 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_HUGETLBFS=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS=y
+58 −39
Original line number Diff line number Diff line
@@ -95,28 +95,15 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
			      unsigned long entries, unsigned long dev_hpa,
			      struct mm_iommu_table_group_mem_t **pmem)
{
	struct mm_iommu_table_group_mem_t *mem;
	long i, ret, locked_entries = 0;
	struct mm_iommu_table_group_mem_t *mem, *mem2;
	long i, ret, locked_entries = 0, pinned = 0;
	unsigned int pageshift;

	mutex_lock(&mem_list_mutex);

	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
			next) {
		/* Overlap? */
		if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
				(ua < (mem->ua +
				       (mem->entries << PAGE_SHIFT)))) {
			ret = -EINVAL;
			goto unlock_exit;
		}

	}
	unsigned long entry, chunk;

	if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
		ret = mm_iommu_adjust_locked_vm(mm, entries, true);
		if (ret)
			goto unlock_exit;
			return ret;

		locked_entries = entries;
	}
@@ -148,17 +135,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
	}

	down_read(&mm->mmap_sem);
	ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL);
	chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
			sizeof(struct vm_area_struct *);
	chunk = min(chunk, entries);
	for (entry = 0; entry < entries; entry += chunk) {
		unsigned long n = min(entries - entry, chunk);

		ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
				FOLL_WRITE, mem->hpages + entry, NULL);
		if (ret == n) {
			pinned += n;
			continue;
		}
		if (ret > 0)
			pinned += ret;
		break;
	}
	up_read(&mm->mmap_sem);
	if (ret != entries) {
		/* free the reference taken */
		for (i = 0; i < ret; i++)
			put_page(mem->hpages[i]);

		vfree(mem->hpas);
		kfree(mem);
	if (pinned != entries) {
		if (!ret)
			ret = -EFAULT;
		goto unlock_exit;
		goto free_exit;
	}

	pageshift = PAGE_SHIFT;
@@ -183,21 +180,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
	}

good_exit:
	ret = 0;
	atomic64_set(&mem->mapped, 1);
	mem->used = 1;
	mem->ua = ua;
	mem->entries = entries;
	*pmem = mem;

	mutex_lock(&mem_list_mutex);

	list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
		/* Overlap? */
		if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
				(ua < (mem2->ua +
				       (mem2->entries << PAGE_SHIFT)))) {
			ret = -EINVAL;
			mutex_unlock(&mem_list_mutex);
			goto free_exit;
		}
	}

	list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);

	mutex_unlock(&mem_list_mutex);

	*pmem = mem;

	return 0;

free_exit:
	/* free the reference taken */
	for (i = 0; i < pinned; i++)
		put_page(mem->hpages[i]);

	vfree(mem->hpas);
	kfree(mem);

unlock_exit:
	if (locked_entries && ret)
	mm_iommu_adjust_locked_vm(mm, locked_entries, false);

	mutex_unlock(&mem_list_mutex);

	return ret;
}

@@ -266,7 +285,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
{
	long ret = 0;
	unsigned long entries, dev_hpa;
	unsigned long unlock_entries = 0;

	mutex_lock(&mem_list_mutex);

@@ -287,17 +306,17 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
		goto unlock_exit;
	}

	if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
		unlock_entries = mem->entries;

	/* @mapped became 0 so now mappings are disabled, release the region */
	entries = mem->entries;
	dev_hpa = mem->dev_hpa;
	mm_iommu_release(mem);

	if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
		mm_iommu_adjust_locked_vm(mm, entries, false);

unlock_exit:
	mutex_unlock(&mem_list_mutex);

	mm_iommu_adjust_locked_vm(mm, unlock_entries, false);

	return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_put);
+1 −1
Original line number Diff line number Diff line
@@ -324,7 +324,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK

config PPC_RADIX_MMU
	bool "Radix MMU Support"
	depends on PPC_BOOK3S_64
	depends on PPC_BOOK3S_64 && HUGETLB_PAGE
	select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
	default y
	help