Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5866d2f8 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Bernhard Thoben
Browse files

highmem: Provide generic variant of kmap_atomic*



The kmap_atomic* interfaces in all architectures are pretty much the same
except for post map operations (flush) and pre- and post unmap operations.

Provide a generic variant for that.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linuxfoundation.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/r/20201103095857.175939340@linutronix.de

(cherry picked from commit 298fa1ad5571f59cb3ca5497a9455f36867f065e)
parent 23a51173
Loading
Loading
Loading
Loading
+70 −18
Original line number Diff line number Diff line
@@ -31,9 +31,16 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)

#include <asm/kmap_types.h>

/*
 * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
 */
#ifdef CONFIG_KMAP_LOCAL
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
void kunmap_local_indexed(void *vaddr);
#endif

#ifdef CONFIG_HIGHMEM
extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
extern void kunmap_atomic_high(void *kvaddr);
#include <asm/highmem.h>

#ifndef ARCH_HAS_KMAP_FLUSH_TLB
@@ -81,6 +88,11 @@ static inline void kunmap(struct page *page)
 * be used in IRQ contexts, so in some (very limited) cases we need
 * it.
 */

#ifndef CONFIG_KMAP_LOCAL
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
void kunmap_atomic_high(void *kvaddr);

static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
	preempt_disable();
@@ -89,7 +101,38 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
		return page_address(page);
	return kmap_atomic_high_prot(page, prot);
}
#define kmap_atomic(page)	kmap_atomic_prot(page, kmap_prot)

static inline void __kunmap_atomic(void *vaddr)
{
	kunmap_atomic_high(vaddr);
}
#else /* !CONFIG_KMAP_LOCAL */

static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
	preempt_disable();
	pagefault_disable();
	return __kmap_local_page_prot(page, prot);
}

static inline void *kmap_atomic_pfn(unsigned long pfn)
{
	preempt_disable();
	pagefault_disable();
	return __kmap_local_pfn_prot(pfn, kmap_prot);
}

static inline void __kunmap_atomic(void *addr)
{
	kunmap_local_indexed(addr);
}

#endif /* CONFIG_KMAP_LOCAL */

static inline void *kmap_atomic(struct page *page)
{
	return kmap_atomic_prot(page, kmap_prot);
}

/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
@@ -137,25 +180,35 @@ static inline void *kmap_atomic(struct page *page)
	pagefault_disable();
	return page_address(page);
}
#define kmap_atomic_prot(page, prot)	kmap_atomic(page)

static inline void kunmap_atomic_high(void *addr)
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
	return kmap_atomic(page);
}

static inline void *kmap_atomic_pfn(unsigned long pfn)
{
	return kmap_atomic(pfn_to_page(pfn));
}

static inline void __kunmap_atomic(void *addr)
{
	/*
	 * Nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
	 * handles re-enabling faults + preemption
	 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
	 * handles re-enabling faults and preemption
	 */
 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
        kunmap_flush_on_unmap(addr);
 #endif
}

#define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr)	virt_to_page(ptr)

#define kmap_flush_unused()	do {} while(0)
#define kmap_atomic_flush_unused()	do {} while (0)
#endif

#endif /* CONFIG_HIGHMEM */

#if !defined(CONFIG_KMAP_LOCAL)
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)

DECLARE_PER_CPU(int, __kmap_atomic_idx);
@@ -186,22 +239,21 @@ static inline void kmap_atomic_idx_pop(void)
	__this_cpu_dec(__kmap_atomic_idx);
#endif
}

#endif
#endif

/*
 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
 */
#define kunmap_atomic(addr)                                     \
#define kunmap_atomic(__addr)					\
do {								\
	BUILD_BUG_ON(__same_type((addr), struct page *));       \
	kunmap_atomic_high(addr);                                  \
	BUILD_BUG_ON(__same_type((__addr), struct page *));	\
	__kunmap_atomic(__addr);				\
	pagefault_enable();					\
	preempt_enable();					\
} while (0)


/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+3 −0
Original line number Diff line number Diff line
@@ -623,3 +623,6 @@ config VM_MAX_READAHEAD
	  This sets the VM_MAX_READAHEAD value to allow the readahead window
	  to grow to a maximum size of configured. This will benefit sequential
	  read throughput and thus early boot performance.

config KMAP_LOCAL
	bool
+141 −1
Original line number Diff line number Diff line
@@ -30,9 +30,11 @@
#include <asm/tlbflush.h>


#ifndef CONFIG_KMAP_LOCAL
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
DEFINE_PER_CPU(int, __kmap_atomic_idx);
#endif
#endif

/*
 * Virtual_count is not a pure "count".
@@ -307,8 +309,146 @@ void kunmap_high(struct page *page)
	if (need_wakeup)
		wake_up(&pkmap_map_wait);
}

EXPORT_SYMBOL(kunmap_high);
#endif /* CONFIG_HIGHMEM */

#ifdef CONFIG_KMAP_LOCAL

#include <asm/kmap_size.h>

static DEFINE_PER_CPU(int, __kmap_local_idx);

static inline int kmap_local_idx_push(void)
{
	int idx = __this_cpu_inc_return(__kmap_local_idx) - 1;

	WARN_ON_ONCE(in_irq() && !irqs_disabled());
	BUG_ON(idx >= KM_MAX_IDX);
	return idx;
}

static inline int kmap_local_idx(void)
{
	return __this_cpu_read(__kmap_local_idx) - 1;
}

static inline void kmap_local_idx_pop(void)
{
	int idx = __this_cpu_dec_return(__kmap_local_idx);

	BUG_ON(idx < 0);
}

#ifndef arch_kmap_local_post_map
# define arch_kmap_local_post_map(vaddr, pteval)	do { } while (0)
#endif
#ifndef arch_kmap_local_pre_unmap
# define arch_kmap_local_pre_unmap(vaddr)		do { } while (0)
#endif

#ifndef arch_kmap_local_post_unmap
# define arch_kmap_local_post_unmap(vaddr)		do { } while (0)
#endif

#ifndef arch_kmap_local_map_idx
#define arch_kmap_local_map_idx(idx, pfn)	kmap_local_calc_idx(idx)
#endif

#ifndef arch_kmap_local_unmap_idx
#define arch_kmap_local_unmap_idx(idx, vaddr)	kmap_local_calc_idx(idx)
#endif

#ifndef arch_kmap_local_high_get
static inline void *arch_kmap_local_high_get(struct page *page)
{
	return NULL;
}
#endif

/* Unmap a local mapping which was obtained by kmap_high_get() */
static inline void kmap_high_unmap_local(unsigned long vaddr)
{
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
	if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP))
		kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
#endif
}

static inline int kmap_local_calc_idx(int idx)
{
	return idx + KM_MAX_IDX * smp_processor_id();
}

static pte_t *__kmap_pte;

static pte_t *kmap_get_pte(void)
{
	if (!__kmap_pte)
		__kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
	return __kmap_pte;
}

void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
{
	pte_t pteval, *kmap_pte = kmap_get_pte();
	unsigned long vaddr;
	int idx;

	preempt_disable();
	idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	BUG_ON(!pte_none(*(kmap_pte - idx)));
	pteval = pfn_pte(pfn, prot);
	set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
	arch_kmap_local_post_map(vaddr, pteval);
	preempt_enable();

	return (void *)vaddr;
}
EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);

void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
{
	void *kmap;

	if (!PageHighMem(page))
		return page_address(page);

	/* Try kmap_high_get() if architecture has it enabled */
	kmap = arch_kmap_local_high_get(page);
	if (kmap)
		return kmap;

	return __kmap_local_pfn_prot(page_to_pfn(page), prot);
}
EXPORT_SYMBOL(__kmap_local_page_prot);

void kunmap_local_indexed(void *vaddr)
{
	unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
	pte_t *kmap_pte = kmap_get_pte();
	int idx;

	if (addr < __fix_to_virt(FIX_KMAP_END) ||
	    addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
		WARN_ON_ONCE(addr < PAGE_OFFSET);

		/* Handle mappings which were obtained by kmap_high_get() */
		kmap_high_unmap_local(addr);
		return;
	}

	preempt_disable();
	idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
	WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));

	arch_kmap_local_pre_unmap(addr);
	pte_clear(&init_mm, addr, kmap_pte - idx);
	arch_kmap_local_post_unmap(addr);
	kmap_local_idx_pop();
	preempt_enable();
}
EXPORT_SYMBOL(kunmap_local_indexed);
#endif

#if defined(HASHED_PAGE_VIRTUAL)