Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit edc79b2a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds
Browse files

[PATCH] mm: balance dirty pages



Now that we can detect writers of shared mappings, throttle them.  Avoids OOM
by surprise.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d08b3851
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -116,6 +116,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
			loff_t pos, loff_t count);
			loff_t pos, loff_t count);
int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
			   loff_t pos, loff_t count);
			   loff_t pos, loff_t count);
void set_page_dirty_balance(struct page *page);


/* pdflush.c */
/* pdflush.c */
extern int nr_pdflush_threads;	/* Global so it can be exported to sysctl
extern int nr_pdflush_threads;	/* Global so it can be exported to sysctl
+3 −2
Original line number Original line Diff line number Diff line
@@ -49,6 +49,7 @@
#include <linux/module.h>
#include <linux/module.h>
#include <linux/delayacct.h>
#include <linux/delayacct.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/writeback.h>


#include <asm/pgalloc.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
@@ -1571,7 +1572,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock:
unlock:
	pte_unmap_unlock(page_table, ptl);
	pte_unmap_unlock(page_table, ptl);
	if (dirty_page) {
	if (dirty_page) {
		set_page_dirty(dirty_page);
		set_page_dirty_balance(dirty_page);
		put_page(dirty_page);
		put_page(dirty_page);
	}
	}
	return ret;
	return ret;
@@ -2218,7 +2219,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock:
unlock:
	pte_unmap_unlock(page_table, ptl);
	pte_unmap_unlock(page_table, ptl);
	if (dirty_page) {
	if (dirty_page) {
		set_page_dirty(dirty_page);
		set_page_dirty_balance(dirty_page);
		put_page(dirty_page);
		put_page(dirty_page);
	}
	}
	return ret;
	return ret;
+10 −0
Original line number Original line Diff line number Diff line
@@ -244,6 +244,16 @@ static void balance_dirty_pages(struct address_space *mapping)
		pdflush_operation(background_writeout, 0);
		pdflush_operation(background_writeout, 0);
}
}


void set_page_dirty_balance(struct page *page)
{
	if (set_page_dirty(page)) {
		struct address_space *mapping = page_mapping(page);

		if (mapping)
			balance_dirty_pages_ratelimited(mapping);
	}
}

/**
/**
 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
 * @mapping: address_space which was dirtied
 * @mapping: address_space which was dirtied