Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 70df291b authored by Jason Gunthorpe's avatar Jason Gunthorpe
Browse files

mm/mmu_notifiers: do not speculatively allocate a mmu_notifier_mm

A prior commit e0f3c3f7 ("mm/mmu_notifier: init notifier if necessary")
made an attempt at doing this, but had to be reverted as calling
the GFP_KERNEL allocator under the i_mmap_mutex causes deadlock, see
commit 35cfa2b0 ("mm/mmu_notifier: allocate mmu_notifier in advance").

However, we can avoid that problem by doing the allocation only under
the mmap_sem, which is already happening.

Since all writers to mm->mmu_notifier_mm hold the write side of the
mmap_sem reading it under that sem is deterministic and we can use that to
decide if the allocation path is required, without speculation.

The actual update to mmu_notifier_mm must still be done under the
mm_take_all_locks() to ensure read-side coherency.

Link: https://lore.kernel.org/r/20190806231548.25242-3-jgg@ziepe.ca


Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Tested-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 56c57103
Loading
Loading
Loading
Loading
+22 −12
Original line number Diff line number Diff line
@@ -242,27 +242,32 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
 */
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	struct mmu_notifier_mm *mmu_notifier_mm;
	struct mmu_notifier_mm *mmu_notifier_mm = NULL;
	int ret;

	lockdep_assert_held_write(&mm->mmap_sem);
	BUG_ON(atomic_read(&mm->mm_users) <= 0);

	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
	if (unlikely(!mmu_notifier_mm))
	if (!mm->mmu_notifier_mm) {
		/*
		 * kmalloc cannot be called under mm_take_all_locks(), but we
		 * know that mm->mmu_notifier_mm can't change while we hold
		 * the write side of the mmap_sem.
		 */
		mmu_notifier_mm =
			kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
		if (!mmu_notifier_mm)
			return -ENOMEM;

		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
		spin_lock_init(&mmu_notifier_mm->lock);
	}

	ret = mm_take_all_locks(mm);
	if (unlikely(ret))
		goto out_clean;

	if (!mm_has_notifiers(mm)) {
		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
		spin_lock_init(&mmu_notifier_mm->lock);

		mm->mmu_notifier_mm = mmu_notifier_mm;
		mmu_notifier_mm = NULL;
	}
	/* Pairs with the mmdrop in mmu_notifier_unregister_* */
	mmgrab(mm);

	/*
@@ -273,14 +278,19 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
	 * We can't race against any other mmu notifier method either
	 * thanks to mm_take_all_locks().
	 */
	if (mmu_notifier_mm)
		mm->mmu_notifier_mm = mmu_notifier_mm;

	spin_lock(&mm->mmu_notifier_mm->lock);
	hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	mm_drop_all_locks(mm);
	BUG_ON(atomic_read(&mm->mm_users) <= 0);
	return 0;

out_clean:
	kfree(mmu_notifier_mm);
	BUG_ON(atomic_read(&mm->mm_users) <= 0);
	return ret;
}
EXPORT_SYMBOL_GPL(__mmu_notifier_register);