Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3e4d3af5 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds
Browse files

mm: stack based kmap_atomic()



Keep the current interface but ignore the KM_type and use a stack based
approach.

The advantage is that we get rid of crappy code like:

	#define __KM_PTE			\
		(in_nmi() ? KM_NMI_PTE : 	\
		 in_irq() ? KM_IRQ_PTE :	\
		 KM_PTE0)

and in general can stop worrying about what context we're in and what kmap
slots might be appropriate for that.

The downside is that FRV kmap_atomic() gets more expensive.

For now we use a CPP trick suggested by Andrew:

  #define kmap_atomic(page, args...) __kmap_atomic(page)

to avoid having to touch all kmap_atomic() users in a single patch.

[ not compiled on:
  - mn10300: the arch doesn't actually build with highmem to begin with ]

[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c]
Acked-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarChris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 61ecdb80
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
#ifdef CONFIG_HIGHMEM
#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page, enum km_type type);
extern void *__kmap_atomic(struct page *page);
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr);
extern struct page *kmap_atomic_to_page(const void *ptr);
#endif
#endif


+14 −9
Original line number Original line Diff line number Diff line
@@ -36,18 +36,17 @@ void kunmap(struct page *page)
}
}
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kunmap);


void *kmap_atomic(struct page *page, enum km_type type)
void *__kmap_atomic(struct page *page)
{
{
	unsigned int idx;
	unsigned int idx;
	unsigned long vaddr;
	unsigned long vaddr;
	void *kmap;
	void *kmap;
	int type;


	pagefault_disable();
	pagefault_disable();
	if (!PageHighMem(page))
	if (!PageHighMem(page))
		return page_address(page);
		return page_address(page);


	debug_kmap_atomic(type);

#ifdef CONFIG_DEBUG_HIGHMEM
#ifdef CONFIG_DEBUG_HIGHMEM
	/*
	/*
	 * There is no cache coherency issue when non VIVT, so force the
	 * There is no cache coherency issue when non VIVT, so force the
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
	if (kmap)
	if (kmap)
		return kmap;
		return kmap;


	type = kmap_atomic_idx_push();

	idx = type + KM_TYPE_NR * smp_processor_id();
	idx = type + KM_TYPE_NR * smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)


	return (void *)vaddr;
	return (void *)vaddr;
}
}
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(__kmap_atomic);


void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
void __kunmap_atomic(void *kvaddr)
{
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
	int idx, type;


	if (kvaddr >= (void *)FIXADDR_START) {
	if (kvaddr >= (void *)FIXADDR_START) {
		type = kmap_atomic_idx_pop();
		idx = type + KM_TYPE_NR * smp_processor_id();

		if (cache_is_vivt())
		if (cache_is_vivt())
			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
	}
	}
	pagefault_enable();
	pagefault_enable();
}
}
EXPORT_SYMBOL(kunmap_atomic_notypecheck);
EXPORT_SYMBOL(__kunmap_atomic);


void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
void *kmap_atomic_pfn(unsigned long pfn)
{
{
	unsigned int idx;
	unsigned long vaddr;
	unsigned long vaddr;
	int idx, type;


	pagefault_disable();
	pagefault_disable();


	type = kmap_atomic_idx_push();
	idx = type + KM_TYPE_NR * smp_processor_id();
	idx = type + KM_TYPE_NR * smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
#ifdef CONFIG_DEBUG_HIGHMEM
+5 −20
Original line number Original line Diff line number Diff line
@@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
	(void *) damlr;										  \
	(void *) damlr;										  \
})
})


static inline void *kmap_atomic(struct page *page, enum km_type type)
static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
{
{
	unsigned long paddr;
	unsigned long paddr;


	pagefault_disable();
	pagefault_disable();
	debug_kmap_atomic(type);
	paddr = page_to_phys(page);
	paddr = page_to_phys(page);


	switch (type) {
	switch (type) {
@@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
        case 1:		return __kmap_atomic_primary(1, paddr, 3);
        case 1:		return __kmap_atomic_primary(1, paddr, 3);
        case 2:		return __kmap_atomic_primary(2, paddr, 4);
        case 2:		return __kmap_atomic_primary(2, paddr, 4);
        case 3:		return __kmap_atomic_primary(3, paddr, 5);
        case 3:		return __kmap_atomic_primary(3, paddr, 5);
        case 4:		return __kmap_atomic_primary(4, paddr, 6);
        case 5:		return __kmap_atomic_primary(5, paddr, 7);
        case 6:		return __kmap_atomic_primary(6, paddr, 8);
        case 7:		return __kmap_atomic_primary(7, paddr, 9);
        case 8:		return __kmap_atomic_primary(8, paddr, 10);

	case 9 ... 9 + NR_TLB_LINES - 1:
		return __kmap_atomic_secondary(type - 9, paddr);


	default:
	default:
		BUG();
		BUG();
@@ -152,22 +143,13 @@ do { \
	asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");	\
	asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");	\
} while(0)
} while(0)


static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
{
{
	switch (type) {
	switch (type) {
        case 0:		__kunmap_atomic_primary(0, 2);	break;
        case 0:		__kunmap_atomic_primary(0, 2);	break;
        case 1:		__kunmap_atomic_primary(1, 3);	break;
        case 1:		__kunmap_atomic_primary(1, 3);	break;
        case 2:		__kunmap_atomic_primary(2, 4);	break;
        case 2:		__kunmap_atomic_primary(2, 4);	break;
        case 3:		__kunmap_atomic_primary(3, 5);	break;
        case 3:		__kunmap_atomic_primary(3, 5);	break;
        case 4:		__kunmap_atomic_primary(4, 6);	break;
        case 5:		__kunmap_atomic_primary(5, 7);	break;
        case 6:		__kunmap_atomic_primary(6, 8);	break;
        case 7:		__kunmap_atomic_primary(7, 9);	break;
        case 8:		__kunmap_atomic_primary(8, 10);	break;

	case 9 ... 9 + NR_TLB_LINES - 1:
		__kunmap_atomic_secondary(type - 9, kvaddr);
		break;


	default:
	default:
		BUG();
		BUG();
@@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
	pagefault_enable();
	pagefault_enable();
}
}


void *__kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);

#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLY__ */


#endif /* __KERNEL__ */
#endif /* __KERNEL__ */
+2 −2
Original line number Original line Diff line number Diff line
@@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
	dampr2 = __get_DAMPR(2);
	dampr2 = __get_DAMPR(2);


	for (i = 0; i < nents; i++) {
	for (i = 0; i < nents; i++) {
		vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE);
		vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);


		frv_dcache_writeback((unsigned long) vaddr,
		frv_dcache_writeback((unsigned long) vaddr,
				     (unsigned long) vaddr + PAGE_SIZE);
				     (unsigned long) vaddr + PAGE_SIZE);


	}
	}


	kunmap_atomic(vaddr, __KM_CACHE);
	kunmap_atomic_primary(vaddr, __KM_CACHE);
	if (dampr2) {
	if (dampr2) {
		__set_DAMPR(2, dampr2);
		__set_DAMPR(2, dampr2);
		__set_IAMPR(2, dampr2);
		__set_IAMPR(2, dampr2);
+4 −4
Original line number Original line Diff line number Diff line
@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)


	dampr2 = __get_DAMPR(2);
	dampr2 = __get_DAMPR(2);


	vaddr = kmap_atomic(page, __KM_CACHE);
	vaddr = kmap_atomic_primary(page, __KM_CACHE);


	frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
	frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);


	kunmap_atomic(vaddr, __KM_CACHE);
	kunmap_atomic_primary(vaddr, __KM_CACHE);


	if (dampr2) {
	if (dampr2) {
		__set_DAMPR(2, dampr2);
		__set_DAMPR(2, dampr2);
@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,


	dampr2 = __get_DAMPR(2);
	dampr2 = __get_DAMPR(2);


	vaddr = kmap_atomic(page, __KM_CACHE);
	vaddr = kmap_atomic_primary(page, __KM_CACHE);


	start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
	start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
	frv_cache_wback_inv(start, start + len);
	frv_cache_wback_inv(start, start + len);


	kunmap_atomic(vaddr, __KM_CACHE);
	kunmap_atomic_primary(vaddr, __KM_CACHE);


	if (dampr2) {
	if (dampr2) {
		__set_DAMPR(2, dampr2);
		__set_DAMPR(2, dampr2);
Loading