Commit ac103e66 authored by Ira Weiny's avatar Ira Weiny Committed by Bernhard Thoben
Browse files

arch/kmap: define kmap_atomic_prot() for all arch's



To support kmap_atomic_prot(), all architectures need to support
protections passed to their kmap_atomic_high() function.  Pass protections
into kmap_atomic_high() and change the name to kmap_atomic_high_prot() to
match.

Then define kmap_atomic_prot() as a core function which calls
kmap_atomic_high_prot() when needed.

Finally, redefine kmap_atomic() as a wrapper of kmap_atomic_prot() with
the default kmap_prot exported by the architectures.
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christian König <christian.koenig@amd.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Helge Deller <deller@gmx.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20200507150004.1423069-11-ira.weiny@intel.com

Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 20b271dfe9d932b02b067a1f7ba9805c5b8d79bd)
parent 462964ea
......@@ -19,7 +19,8 @@
#include <asm/tlbflush.h>
#include "mm.h"
void *kmap_atomic_high(struct page *page)
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned int idx;
unsigned long vaddr;
......@@ -56,10 +57,11 @@ void *kmap_atomic_high(struct page *page)
* with the new mapping.
*/
set_top_pte(vaddr, mk_pte(page, kmap_prot));
//set_fixmap_pte(idx, mk_pte(page, prot));
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic_high);
EXPORT_SYMBOL(kmap_atomic_high_prot);
void kunmap_atomic_high(void *kvaddr)
{
......
......@@ -16,7 +16,7 @@ void kmap_flush_tlb(unsigned long addr)
}
EXPORT_SYMBOL(kmap_flush_tlb);
void *kmap_atomic_high(struct page *page)
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
......@@ -27,12 +27,12 @@ void *kmap_atomic_high(struct page *page)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
set_pte(kmap_pte-idx, mk_pte(page, prot));
local_flush_tlb_one((unsigned long)vaddr);
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_atomic_high);
EXPORT_SYMBOL(kmap_atomic_high_prot);
void kunmap_atomic_high(void *kvaddr)
{
......
......@@ -48,7 +48,7 @@ void __init kmap_init(void)
kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
}
void *kmap_atomic_high(struct page *page)
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
long idx, type;
......@@ -67,7 +67,7 @@ void *kmap_atomic_high(struct page *page)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
set_pte(kmap_pte-idx, mk_pte(page, prot));
/* XXX Fix - Anton */
#if 0
__flush_tlb_one(vaddr);
......@@ -77,7 +77,7 @@ void *kmap_atomic_high(struct page *page)
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_atomic_high);
EXPORT_SYMBOL(kmap_atomic_high_prot);
void kunmap_atomic_high(void *kvaddr)
{
......
......@@ -54,20 +54,6 @@ extern unsigned long highstart_pfn, highend_pfn;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
return kmap_atomic_high_prot(page, prot);
}
static inline void *kmap_atomic_high(struct page *page)
{
return kmap_atomic_high_prot(page, kmap_prot);
}
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
......
......@@ -37,7 +37,7 @@ static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
color;
}
void *kmap_atomic_high(struct page *page)
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
enum fixed_addresses idx;
unsigned long vaddr;
......@@ -48,11 +48,11 @@ void *kmap_atomic_high(struct page *page)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte + idx)));
#endif
set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));
set_pte(kmap_pte + idx, mk_pte(page, prot));
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic_high);
EXPORT_SYMBOL(kmap_atomic_high_prot);
void kunmap_atomic_high(void *kvaddr)
{
......
......@@ -32,7 +32,7 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#include <asm/kmap_types.h>
#ifdef CONFIG_HIGHMEM
extern void *kmap_atomic_high(struct page *page);
extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
extern void kunmap_atomic_high(void *kvaddr);
#include <asm/highmem.h>
......@@ -77,14 +77,15 @@ static inline void kunmap(struct page *page)
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
static inline void *kmap_atomic(struct page *page)
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
return kmap_atomic_high(page);
return kmap_atomic_high_prot(page, prot);
}
#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment