Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc2acab3 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

[PATCH] mm: tlb_finish_mmu forget rss



zap_pte_range has been counting the pages it frees in tlb->freed, then
tlb_finish_mmu has used that to update the mm's rss.  That got stranger when I
added anon_rss, yet updated it by a different route; and stranger when rss and
anon_rss became mm_counters with special access macros.  And it would no
longer be viable if we're relying on page_table_lock to stabilize the
mm_counter, but calling tlb_finish_mmu outside that lock.

Remove the mmu_gather's freed field, let tlb_finish_mmu stick to its own
business, just decrement the rss mm_counter in zap_pte_range (yes, there was
some point to batching the update, and a subsequent patch restores that).  And
forget the anal paranoia of first reading the counter to avoid going negative
- if rss does go negative, just fix that bug.

Remove the mmu_gather's flushes and avoided_flushes from arm and arm26: no use
was being made of them.  But arm26 alone was actually using the freed, in the
way some others use need_flush: give it a need_flush.  arm26 seems to prefer
spaces to tabs here: respect that.

Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4d6ddfa9
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -18,8 +18,7 @@

/* Heavily inspired by the ppc64 code.  */

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
	{ NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };

void flush_tlb_pending(void)
{
+1 −14
Original line number Diff line number Diff line
@@ -27,11 +27,7 @@
 */
struct mmu_gather {
	struct mm_struct	*mm;
	unsigned int		freed;
	unsigned int		fullmm;

	unsigned int		flushes;
	unsigned int		avoided_flushes;
};

DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -42,7 +38,6 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
	struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);

	tlb->mm = mm;
	tlb->freed = 0;
	tlb->fullmm = full_mm_flush;

	return tlb;
@@ -51,16 +46,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	struct mm_struct *mm = tlb->mm;
	unsigned long freed = tlb->freed;
	int rss = get_mm_counter(mm, rss);

	if (rss < freed)
		freed = rss;
	add_mm_counter(mm, rss, -freed);

	if (tlb->fullmm)
		flush_tlb_mm(mm);
		flush_tlb_mm(tlb->mm);

	/* keep the page table cache within bounds */
	check_pgt_cache();
+13 −22
Original line number Diff line number Diff line
@@ -10,11 +10,8 @@
 */
struct mmu_gather {
        struct mm_struct        *mm;
        unsigned int            freed;
        unsigned int            need_flush;
        unsigned int            fullmm;

        unsigned int            flushes;
        unsigned int            avoided_flushes;
};

DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -25,7 +22,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
        struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);

        tlb->mm = mm;
        tlb->freed = 0;
        tlb->need_flush = 0;
        tlb->fullmm = full_mm_flush;

        return tlb;
@@ -34,20 +31,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
        struct mm_struct *mm = tlb->mm;
        unsigned long freed = tlb->freed;
        int rss = get_mm_counter(mm, rss);

        if (rss < freed)
                freed = rss;
        add_mm_counter(mm, rss, -freed);

        if (freed) {
                flush_tlb_mm(mm);
                tlb->flushes++;
        } else {
                tlb->avoided_flushes++;
        }
        if (tlb->need_flush)
                flush_tlb_mm(tlb->mm);

        /* keep the page table cache within bounds */
        check_pgt_cache();
@@ -65,7 +50,13 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
        } while (0)
#define tlb_end_vma(tlb,vma)                    do { } while (0)

#define tlb_remove_page(tlb,page)       free_page_and_swap_cache(page)
static inline void
tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
        tlb->need_flush = 1;
        free_page_and_swap_cache(page);
}

#define pte_free_tlb(tlb,ptep)          pte_free(ptep)
#define pmd_free_tlb(tlb,pmdp)          pmd_free(pmdp)

+0 −9
Original line number Diff line number Diff line
@@ -42,7 +42,6 @@ struct mmu_gather {
	unsigned int		nr;	/* set to ~0U means fast mode */
	unsigned int		need_flush;/* Really unmapped some ptes? */
	unsigned int		fullmm; /* non-zero means full mm flush */
	unsigned long		freed;
	struct page *		pages[FREE_PTE_NR];
};

@@ -63,7 +62,6 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
	tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;

	tlb->fullmm = full_mm_flush;
	tlb->freed = 0;

	return tlb;
}
@@ -88,13 +86,6 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	int freed = tlb->freed;
	struct mm_struct *mm = tlb->mm;
	int rss = get_mm_counter(mm, rss);

	if (rss < freed)
		freed = rss;
	add_mm_counter(mm, rss, -freed);
	tlb_flush_mmu(tlb, start, end);

	/* keep the page table cache within bounds */
+0 −9
Original line number Diff line number Diff line
@@ -60,7 +60,6 @@ struct mmu_gather {
	unsigned int		nr;		/* == ~0U => fast mode */
	unsigned char		fullmm;		/* non-zero means full mm flush */
	unsigned char		need_flush;	/* really unmapped some PTEs? */
	unsigned long		freed;		/* number of pages freed */
	unsigned long		start_addr;
	unsigned long		end_addr;
	struct page 		*pages[FREE_PTE_NR];
@@ -147,7 +146,6 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
	 */
	tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
	tlb->fullmm = full_mm_flush;
	tlb->freed = 0;
	tlb->start_addr = ~0UL;
	return tlb;
}
@@ -159,13 +157,6 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	unsigned long freed = tlb->freed;
	struct mm_struct *mm = tlb->mm;
	unsigned long rss = get_mm_counter(mm, rss);

	if (rss < freed)
		freed = rss;
	add_mm_counter(mm, rss, -freed);
	/*
	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
	 * tlb->end_addr.
Loading