Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1cf35d47 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts



The mmu-gather operation 'tlb_flush_mmu()' has done two things: the
actual tlb flush operation, and the batched freeing of the pages that
the TLB entries pointed at.

This splits the operation into separate phases, so that the forced
batched flushing done by zap_pte_range() can now do the actual TLB flush
while still holding the page table lock, but delay the batched freeing
of all the pages to after the lock has been dropped.

This in turn allows us to avoid a race condition between
set_page_dirty() (as called by zap_pte_range() when it finds a dirty
shared memory pte) and page_mkclean(): because we now flush all the
dirty page data from the TLB's while holding the pte lock,
page_mkclean() will be held up walking the (recently cleaned) page
tables until after the TLB entries have been flushed from all CPU's.

Reported-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Tested-by: default avatarDave Hansen <dave.hansen@intel.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9a60ee11
Loading
Loading
Loading
Loading
+11 −1
Original line number Diff line number Diff line
@@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
	}
}

static inline void tlb_flush_mmu(struct mmu_gather *tlb)
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
	tlb_flush(tlb);
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	free_pages_and_swap_cache(tlb->pages, tlb->nr);
	tlb->nr = 0;
	if (tlb->pages == tlb->local)
		__tlb_alloc_page(tlb);
}

static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
	tlb_flush_mmu_tlbonly(tlb);
	tlb_flush_mmu_free(tlb);
}

static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
+32 −10
Original line number Diff line number Diff line
@@ -91,18 +91,9 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
#define RR_RID_MASK	0x00000000ffffff00L
#define RR_TO_RID(val) 	((val >> 8) & 0xffffff)

/*
 * Flush the TLB for address range START to END and, if not in fast mode, release the
 * freed pages that where gathered up to this point.
 */
static inline void
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	unsigned long i;
	unsigned int nr;

	if (!tlb->need_flush)
		return;
	tlb->need_flush = 0;

	if (tlb->fullmm) {
@@ -135,6 +126,14 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
		flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
	}

}

static inline void
ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	unsigned long i;
	unsigned int nr;

	/* lastly, release the freed pages */
	nr = tlb->nr;

@@ -144,6 +143,19 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
		free_page_and_swap_cache(tlb->pages[i]);
}

/*
 * Flush the TLB for address range START to END and, if not in fast mode, release the
 * freed pages that where gathered up to this point.
 */
static inline void
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	if (!tlb->need_flush)
		return;
	ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
	ia64_tlb_flush_mmu_free(tlb);
}

static inline void __tlb_alloc_page(struct mmu_gather *tlb)
{
	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
@@ -206,6 +218,16 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
	return tlb->max - tlb->nr;
}

static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu_free(tlb);
}

static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
+12 −1
Original line number Diff line number Diff line
@@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
	tlb->batch = NULL;
}

static inline void tlb_flush_mmu(struct mmu_gather *tlb)
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
	__tlb_flush_mm_lazy(tlb->mm);
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	tlb_table_flush(tlb);
}


static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
	tlb_flush_mmu_tlbonly(tlb);
	tlb_flush_mmu_free(tlb);
}

static inline void tlb_finish_mmu(struct mmu_gather *tlb,
				  unsigned long start, unsigned long end)
{
+8 −0
Original line number Diff line number Diff line
@@ -86,6 +86,14 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
	}
}

static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
}

static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
}
+14 −2
Original line number Diff line number Diff line
@@ -58,14 +58,26 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
			       unsigned long end);

static inline void
tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
	flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
}

static inline void
tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	init_tlb_gather(tlb);
}

static inline void
tlb_flush_mmu(struct mmu_gather *tlb)
{
	if (!tlb->need_flush)
		return;

	flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
	init_tlb_gather(tlb);
	tlb_flush_mmu_tlbonly(tlb);
	tlb_flush_mmu_free(tlb);
}

/* tlb_finish_mmu
Loading