Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2cb7c9cb authored by David Hildenbrand's avatar David Hildenbrand Committed by Ingo Molnar
Browse files

sched/preempt, mm/kmap: Explicitly disable/enable preemption in kmap_atomic_*



The existing code relies on pagefault_disable() implicitly disabling
preemption, so that no schedule will happen between kmap_atomic() and
kunmap_atomic().

Let's make this explicit, to prepare for pagefault_disable() not
touching preemption anymore.

Reviewed-and-tested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: benh@kernel.crashing.org
Cc: bigeasy@linutronix.de
Cc: borntraeger@de.ibm.com
Cc: daniel.vetter@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: hocko@suse.cz
Cc: hughd@google.com
Cc: mst@redhat.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: schwidefsky@de.ibm.com
Cc: yang.shi@windriver.com
Link: http://lkml.kernel.org/r/1431359540-32227-5-git-send-email-dahi@linux.vnet.ibm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b3c395ef
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
	void *kmap;
	int type;

	preempt_disable();
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);
@@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
		kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
	}
	pagefault_enable();
	preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);

@@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
	int idx, type;
	struct page *page = pfn_to_page(pfn);

	preempt_disable();
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);
+2 −0
Original line number Diff line number Diff line
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
	unsigned long paddr;
	int type;

	preempt_disable();
	pagefault_disable();
	type = kmap_atomic_idx_push();
	paddr = page_to_phys(page);
@@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
	}
	kmap_atomic_idx_pop();
	pagefault_enable();
	preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
+3 −1
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
	unsigned long vaddr;
	int type;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	preempt_disable();
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);
@@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
	}

	pagefault_enable();
	preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);

@@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
	unsigned long vaddr;
	int type;

	preempt_disable();
	pagefault_disable();

	type = kmap_atomic_idx_push();
+3 −1
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
	unsigned long vaddr;
	int idx, type;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	preempt_disable();
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);
@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)

	if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
		pagefault_enable();
		preempt_enable();
		return;
	}

@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
#endif
	kmap_atomic_idx_pop();
	pagefault_enable();
	preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
+4 −1
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
	unsigned long vaddr;
	int idx, type;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	preempt_disable();
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);
@@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)

	if (vaddr < FIXADDR_START) { // FIXME
		pagefault_enable();
		preempt_enable();
		return;
	}

@@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
#endif
	kmap_atomic_idx_pop();
	pagefault_enable();
	preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);

@@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
	unsigned long vaddr;
	int idx, type;

	preempt_disable();
	pagefault_disable();

	type = kmap_atomic_idx_push();
Loading