Commit 2a5a03a3 authored by Ira Weiny's avatar Ira Weiny Committed by Bernhard Thoben
Browse files

arch/kmap_atomic: consolidate duplicate code



Every arch has the same code to ensure atomic operations and a check for
!HIGHMEM page.

Remove the duplicate code by defining a core kmap_atomic() which only
calls the arch specific kmap_atomic_high() when the page is high memory.

[akpm@linux-foundation.org: coding style fixes]
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christian König <christian.koenig@amd.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Helge Deller <deller@gmx.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20200507150004.1423069-7-ira.weiny@intel.com

Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 78b6d91ec7bbfc5bcc2dd05bb2cf13c9de1dc7cd)
parent f31ce0b3
......@@ -59,7 +59,6 @@ static inline void *kmap_high_get(struct page *page)
* when CONFIG_HIGHMEM is not set.
*/
#ifdef CONFIG_HIGHMEM
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr);
......
......@@ -19,18 +19,13 @@
#include <asm/tlbflush.h>
#include "mm.h"
void *kmap_atomic(struct page *page)
void *kmap_atomic_high(struct page *page)
{
unsigned int idx;
unsigned long vaddr;
void *kmap;
int type;
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
* There is no cache coherency issue when non VIVT, so force the
......@@ -64,7 +59,7 @@ void *kmap_atomic(struct page *page)
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kmap_atomic_high);
void __kunmap_atomic(void *kvaddr)
{
......
......@@ -53,9 +53,9 @@ extern pte_t *pkmap_page_table;
extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
extern void __kunmap_atomic(void *kvaddr);
static inline void *kmap_atomic(struct page *page)
static inline void *kmap_atomic_high(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
return kmap_atomic_high_prot(page, kmap_prot);
}
static inline struct page *kmap_atomic_to_page(void *ptr)
......
......@@ -44,7 +44,6 @@ extern pte_t *pkmap_page_table;
#define ARCH_HAS_KMAP_FLUSH_TLB
extern void kmap_flush_tlb(unsigned long addr);
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr);
......
......@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
......
......@@ -16,25 +16,11 @@ void kmap_flush_tlb(unsigned long addr)
}
EXPORT_SYMBOL(kmap_flush_tlb);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
* However when holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
void *kmap_atomic(struct page *page)
void *kmap_atomic_high(struct page *page)
{
unsigned long vaddr;
int idx, type;
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
......@@ -46,7 +32,7 @@ void *kmap_atomic(struct page *page)
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kmap_atomic_high);
void __kunmap_atomic(void *kvaddr)
{
......
......@@ -61,9 +61,9 @@ extern pte_t *pkmap_page_table;
extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
extern void __kunmap_atomic(void *kvaddr);
static inline void *kmap_atomic(struct page *page)
static inline void *kmap_atomic_high(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
return kmap_atomic_high_prot(page, kmap_prot);
}
static inline struct page *kmap_atomic_to_page(void *ptr)
......
......@@ -23,13 +23,7 @@
#include <linux/highmem.h>
#include <linux/module.h>
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
......
......@@ -49,7 +49,6 @@ extern void kmap_init(void) __init;
#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
#define flush_cache_kmaps() flush_cache_all()
......
......@@ -48,16 +48,11 @@ void __init kmap_init(void)
kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
}
void *kmap_atomic(struct page *page)
void *kmap_atomic_high(struct page *page)
{
unsigned long vaddr;
long idx, type;
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
......@@ -82,7 +77,7 @@ void *kmap_atomic(struct page *page)
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kmap_atomic_high);
void __kunmap_atomic(void *kvaddr)
{
......
......@@ -54,8 +54,20 @@ extern unsigned long highstart_pfn, highend_pfn;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void *kmap_atomic(struct page *page);
extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
return kmap_atomic_high_prot(page, prot);
}
static inline void *kmap_atomic_high(struct page *page)
{
return kmap_atomic_high_prot(page, kmap_prot);
}
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
......
......@@ -3,6 +3,7 @@
#include <linux/swap.h> /* for totalram_pages */
#include <linux/bootmem.h>
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
......@@ -33,12 +34,6 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
}
EXPORT_SYMBOL(kmap_atomic_prot);
void *kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
EXPORT_SYMBOL(kmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
......
......@@ -67,7 +67,6 @@ static inline void flush_cache_kmaps(void)
flush_cache_all();
}
void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void kmap_init(void);
......
......@@ -37,16 +37,11 @@ static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
color;
}
void *kmap_atomic(struct page *page)
void *kmap_atomic_high(struct page *page)
{
enum fixed_addresses idx;
unsigned long vaddr;
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
idx = kmap_idx(kmap_atomic_idx_push(),
DCACHE_ALIAS(page_to_phys(page)));
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
......@@ -57,7 +52,7 @@ void *kmap_atomic(struct page *page)
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kmap_atomic_high);
void __kunmap_atomic(void *kvaddr)
{
......
......@@ -32,6 +32,7 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#include <asm/kmap_types.h>
#ifdef CONFIG_HIGHMEM
extern void *kmap_atomic_high(struct page *page);
#include <asm/highmem.h>
#ifndef ARCH_HAS_KMAP_FLUSH_TLB
......@@ -62,6 +63,28 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
* However when holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
static inline void *kmap_atomic(struct page *page)
{
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
return kmap_atomic_high(page);
}
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern unsigned long totalhigh_pages;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment