Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d08b3851 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds
Browse files

[PATCH] mm: tracking shared dirty pages



Tracking of dirty pages in shared writeable mmap()s.

The idea is simple: write protect clean shared writeable pages, catch the
write-fault, make writeable and set dirty.  On page write-back clean all the
PTE dirty bits and write protect them once again.

The implementation is a tad harder, mainly because the default
backing_dev_info capabilities were too loosely maintained.  Hence it is not
enough to test the backing_dev_info for cap_account_dirty.

The current heuristic is as follows, a VMA is eligible when:
 - its shared writeable
    (vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)
 - it is not a 'special' mapping
    (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) == 0
 - the backing_dev_info is cap_account_dirty
    mapping_cap_account_dirty(vma->vm_file->f_mapping)
 - f_op->mmap() didn't change the default page protection

Page from remap_pfn_range() are explicitly excluded because their COW
semantics are already horrid enough (see vm_normal_page() in do_wp_page()) and
because they don't have a backing store anyway.

mprotect() is taught about the new behaviour as well.  However it overrides
the last condition.

Cleaning the pages on write-back is done with page_mkclean() a new rmap call.
It can be called on any page, but is currently only implemented for mapped
pages, if the page is found the be of a VMA that accounts dirty pages it will
also wrprotect the PTE.

Finally, in fs/buffers.c:try_to_free_buffers(); remove clear_page_dirty() from
under ->private_lock.  This seems to be safe, since ->private_lock is used to
serialize access to the buffers, not the page itself.  This is needed because
clear_page_dirty() will call into page_mkclean() and would thereby violate
locking order.

[dhowells@redhat.com: Provide a page_mkclean() implementation for NOMMU]
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 725d704e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2987,6 +2987,7 @@ int try_to_free_buffers(struct page *page)

	spin_lock(&mapping->private_lock);
	ret = drop_buffers(page, &buffers_to_free);
	spin_unlock(&mapping->private_lock);
	if (ret) {
		/*
		 * If the filesystem writes its buffers by hand (eg ext3)
@@ -2998,7 +2999,6 @@ int try_to_free_buffers(struct page *page)
		 */
		clear_page_dirty(page);
	}
	spin_unlock(&mapping->private_lock);
out:
	if (buffers_to_free) {
		struct buffer_head *bh = buffers_to_free;
+34 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/debug_locks.h>
#include <linux/backing-dev.h>

struct mempolicy;
struct anon_vma;
@@ -810,6 +811,39 @@ struct shrinker;
extern struct shrinker *set_shrinker(int, shrinker_t);
extern void remove_shrinker(struct shrinker *shrinker);

/*
 * Some shared mappigns will want the pages marked read-only
 * to track write events. If so, we'll downgrade vm_page_prot
 * to the private version (using protection_map[] without the
 * VM_SHARED bit).
 */
static inline int vma_wants_writenotify(struct vm_area_struct *vma)
{
	unsigned int vm_flags = vma->vm_flags;

	/* If it was private or non-writable, the write bit is already clear */
	if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
		return 0;

	/* The backer wishes to know when pages are first written to? */
	if (vma->vm_ops && vma->vm_ops->page_mkwrite)
		return 1;

	/* The open routine did something to the protections already? */
	if (pgprot_val(vma->vm_page_prot) !=
	    pgprot_val(protection_map[vm_flags &
		    (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
		return 0;

	/* Specialty mapping? */
	if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
		return 0;

	/* Can the mapping track the dirty pages? */
	return vma->vm_file && vma->vm_file->f_mapping &&
		mapping_cap_account_dirty(vma->vm_file->f_mapping);
}

extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));

int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+14 −0
Original line number Diff line number Diff line
@@ -103,6 +103,14 @@ pte_t *page_check_address(struct page *, struct mm_struct *,
 */
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);

/*
 * Cleans the PTEs of shared mappings.
 * (and since clean PTEs should also be readonly, write protects them too)
 *
 * returns the number of cleaned PTEs.
 */
int page_mkclean(struct page *);

#else	/* !CONFIG_MMU */

#define anon_vma_init()		do {} while (0)
@@ -112,6 +120,12 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
#define page_referenced(page,l) TestClearPageReferenced(page)
#define try_to_unmap(page, refs) SWAP_FAIL

static inline int page_mkclean(struct page *page)
{
	return 0;
}


#endif	/* CONFIG_MMU */

/*
+23 −6
Original line number Diff line number Diff line
@@ -1458,14 +1458,19 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
	struct page *old_page, *new_page;
	pte_t entry;
	int reuse, ret = VM_FAULT_MINOR;
	int reuse = 0, ret = VM_FAULT_MINOR;
	struct page *dirty_page = NULL;

	old_page = vm_normal_page(vma, address, orig_pte);
	if (!old_page)
		goto gotten;

	if (unlikely((vma->vm_flags & (VM_SHARED|VM_WRITE)) ==
				(VM_SHARED|VM_WRITE))) {
	/*
	 * Only catch write-faults on shared writable pages, read-only
	 * shared pages can get COWed by get_user_pages(.write=1, .force=1).
	 */
	if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
					(VM_WRITE|VM_SHARED))) {
		if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
			/*
			 * Notify the address space that the page is about to
@@ -1494,13 +1499,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
			if (!pte_same(*page_table, orig_pte))
				goto unlock;
		}

		dirty_page = old_page;
		get_page(dirty_page);
		reuse = 1;
	} else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
		reuse = can_share_swap_page(old_page);
		unlock_page(old_page);
	} else {
		reuse = 0;
	}

	if (reuse) {
@@ -1566,6 +1570,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
		page_cache_release(old_page);
unlock:
	pte_unmap_unlock(page_table, ptl);
	if (dirty_page) {
		set_page_dirty(dirty_page);
		put_page(dirty_page);
	}
	return ret;
oom:
	if (old_page)
@@ -2098,6 +2106,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
	unsigned int sequence = 0;
	int ret = VM_FAULT_MINOR;
	int anon = 0;
	struct page *dirty_page = NULL;

	pte_unmap(page_table);
	BUG_ON(vma->vm_flags & VM_PFNMAP);
@@ -2192,6 +2201,10 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
		} else {
			inc_mm_counter(mm, file_rss);
			page_add_file_rmap(new_page);
			if (write_access) {
				dirty_page = new_page;
				get_page(dirty_page);
			}
		}
	} else {
		/* One of our sibling threads was faster, back out. */
@@ -2204,6 +2217,10 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
	lazy_mmu_prot_update(entry);
unlock:
	pte_unmap_unlock(page_table, ptl);
	if (dirty_page) {
		set_page_dirty(dirty_page);
		put_page(dirty_page);
	}
	return ret;
oom:
	page_cache_release(new_page);
+4 −6
Original line number Diff line number Diff line
@@ -1105,12 +1105,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
			goto free_vma;
	}

	/* Don't make the VMA automatically writable if it's shared, but the
	 * backer wishes to know when pages are first written to */
	if (vma->vm_ops && vma->vm_ops->page_mkwrite)
		vma->vm_page_prot =
			protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];

	/* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
	 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
	 * that memory reservation must be checked; but that reservation
@@ -1128,6 +1122,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
	pgoff = vma->vm_pgoff;
	vm_flags = vma->vm_flags;

	if (vma_wants_writenotify(vma))
		vma->vm_page_prot =
			protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];

	if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
			vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
		file = vma->vm_file;
Loading