Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cd551f97 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

ksm: distribute remove_mm_from_lists



Do some housekeeping in ksm.c, to help make the next patch easier
to understand: remove the function remove_mm_from_lists, distributing
its code to its callsites scan_get_next_rmap_item and __ksm_exit.

That turns out to be a win in scan_get_next_rmap_item: move its
remove_trailing_rmap_items and cursor advancement up, and it becomes
simpler than before.  __ksm_exit becomes messier, but will change
again; and moving its remove_trailing_rmap_items up lets us strengthen
the unstable tree item's age condition in remove_rmap_item_from_tree.

Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Acked-by: default avatarIzik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d952b791
Loading
Loading
Loading
Loading
+42 −55
Original line number Diff line number Diff line
@@ -444,14 +444,9 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
		 * But __ksm_exit has to be careful: do the rb_erase
		 * if it's interrupting a scan, and this rmap_item was
		 * inserted by this scan rather than left from before.
		 *
		 * Because of the case in which remove_mm_from_lists
		 * increments seqnr before removing rmaps, unstable_nr
		 * may even be 2 behind seqnr, but should never be
		 * further behind.  Yes, I did have trouble with this!
		 */
		age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
		BUG_ON(age > 2);
		BUG_ON(age > 1);
		if (!age)
			rb_erase(&rmap_item->node, &root_unstable_tree);
		ksm_pages_unshared--;
@@ -546,37 +541,6 @@ static int unmerge_and_remove_all_rmap_items(void)
	return err;
}

static void remove_mm_from_lists(struct mm_struct *mm)
{
	struct mm_slot *mm_slot;

	spin_lock(&ksm_mmlist_lock);
	mm_slot = get_mm_slot(mm);

	/*
	 * This mm_slot is always at the scanning cursor when we're
	 * called from scan_get_next_rmap_item; but it's a special
	 * case when we're called from __ksm_exit.
	 */
	if (ksm_scan.mm_slot == mm_slot) {
		ksm_scan.mm_slot = list_entry(
			mm_slot->mm_list.next, struct mm_slot, mm_list);
		ksm_scan.address = 0;
		ksm_scan.rmap_item = list_entry(
			&ksm_scan.mm_slot->rmap_list, struct rmap_item, link);
		if (ksm_scan.mm_slot == &ksm_mm_head)
			ksm_scan.seqnr++;
	}

	hlist_del(&mm_slot->link);
	list_del(&mm_slot->mm_list);
	spin_unlock(&ksm_mmlist_lock);

	remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
	free_mm_slot(mm_slot);
	clear_bit(MMF_VM_MERGEABLE, &mm->flags);
}

static u32 calc_checksum(struct page *page)
{
	u32 checksum;
@@ -1241,33 +1205,31 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
		}
	}

	if (!ksm_scan.address) {
		/*
		 * We've completed a full scan of all vmas, holding mmap_sem
		 * throughout, and found no VM_MERGEABLE: so do the same as
		 * __ksm_exit does to remove this mm from all our lists now.
		 */
		remove_mm_from_lists(mm);
		up_read(&mm->mmap_sem);
		slot = ksm_scan.mm_slot;
		if (slot != &ksm_mm_head)
			goto next_mm;
		return NULL;
	}

	/*
	 * Nuke all the rmap_items that are above this current rmap:
	 * because there were no VM_MERGEABLE vmas with such addresses.
	 */
	remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next);
	up_read(&mm->mmap_sem);

	spin_lock(&ksm_mmlist_lock);
	slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
	ksm_scan.mm_slot = slot;
	ksm_scan.mm_slot = list_entry(slot->mm_list.next,
						struct mm_slot, mm_list);
	if (ksm_scan.address == 0) {
		/*
		 * We've completed a full scan of all vmas, holding mmap_sem
		 * throughout, and found no VM_MERGEABLE: so do the same as
		 * __ksm_exit does to remove this mm from all our lists now.
		 */
		hlist_del(&slot->link);
		list_del(&slot->mm_list);
		free_mm_slot(slot);
		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
	}
	spin_unlock(&ksm_mmlist_lock);
	up_read(&mm->mmap_sem);

	/* Repeat until we've completed scanning the whole list */
	slot = ksm_scan.mm_slot;
	if (slot != &ksm_mm_head)
		goto next_mm;

@@ -1408,13 +1370,38 @@ int __ksm_enter(struct mm_struct *mm)

void __ksm_exit(struct mm_struct *mm)
{
	struct mm_slot *mm_slot;

	/*
	 * This process is exiting: doesn't hold and doesn't need mmap_sem;
	 * but we do need to exclude ksmd and other exiters while we modify
	 * the various lists and trees.
	 */
	mutex_lock(&ksm_thread_mutex);
	remove_mm_from_lists(mm);
	spin_lock(&ksm_mmlist_lock);
	mm_slot = get_mm_slot(mm);
	if (!list_empty(&mm_slot->rmap_list)) {
		spin_unlock(&ksm_mmlist_lock);
		remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
		spin_lock(&ksm_mmlist_lock);
	}

	if (ksm_scan.mm_slot == mm_slot) {
		ksm_scan.mm_slot = list_entry(
			mm_slot->mm_list.next, struct mm_slot, mm_list);
		ksm_scan.address = 0;
		ksm_scan.rmap_item = list_entry(
			&ksm_scan.mm_slot->rmap_list, struct rmap_item, link);
		if (ksm_scan.mm_slot == &ksm_mm_head)
			ksm_scan.seqnr++;
	}

	hlist_del(&mm_slot->link);
	list_del(&mm_slot->mm_list);
	spin_unlock(&ksm_mmlist_lock);

	free_mm_slot(mm_slot);
	clear_bit(MMF_VM_MERGEABLE, &mm->flags);
	mutex_unlock(&ksm_thread_mutex);
}