Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 00a9730e authored by Guo Ren's avatar Guo Ren
Browse files

csky: Cache and TLB routines



This patch adds cache and tlb sync codes for abiv1 & abiv2.

Signed-off-by: default avatarGuo Ren <ren_guo@c-sky.com>
Reviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
parent 4859bfca
Loading
Loading
Loading
Loading
+52 −0
Original line number Original line Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/syscalls.h>
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/cachectl.h>

void flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (mapping && !mapping_mapped(mapping)) {
		set_bit(PG_arch_1, &(page)->flags);
		return;
	}

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	addr = (unsigned long) page_address(page);
	dcache_wb_range(addr, addr + PAGE_SIZE);
}

void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
		      pte_t *pte)
{
	unsigned long addr;
	struct page *page;
	unsigned long pfn;

	pfn = pte_pfn(*pte);
	if (unlikely(!pfn_valid(pfn)))
		return;

	page = pfn_to_page(pfn);
	addr = (unsigned long) page_address(page);

	if (vma->vm_flags & VM_EXEC ||
	    pages_do_alias(addr, address & PAGE_MASK))
		cache_wbinv_all();

	clear_bit(PG_arch_1, &(page)->flags);
}
+49 −0
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#ifndef __ABI_CSKY_CACHEFLUSH_H
#define __ABI_CSKY_CACHEFLUSH_H

#include <linux/compiler.h>
#include <asm/string.h>
#include <asm/cache.h>

#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);

#define flush_cache_mm(mm)			cache_wbinv_all()
#define flush_cache_page(vma, page, pfn)	cache_wbinv_all()
#define flush_cache_dup_mm(mm)			cache_wbinv_all()

/*
 * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
 * Use cache_wbinv_all() here and need to be improved in future.
 */
#define flush_cache_range(vma, start, end)	cache_wbinv_all()
#define flush_cache_vmap(start, end)		cache_wbinv_range(start, end)
#define flush_cache_vunmap(start, end)		cache_wbinv_range(start, end)

#define flush_icache_page(vma, page)		cache_wbinv_all()
#define flush_icache_range(start, end)		cache_wbinv_range(start, end)

#define flush_icache_user_range(vma, pg, adr, len) \
				cache_wbinv_range(adr, adr + len)

#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
	cache_wbinv_all(); \
	memcpy(dst, src, len); \
	cache_wbinv_all(); \
} while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
	cache_wbinv_all(); \
	memcpy(dst, src, len); \
	cache_wbinv_all(); \
} while (0)

#define flush_dcache_mmap_lock(mapping)		do {} while (0)
#define flush_dcache_mmap_unlock(mapping)	do {} while (0)

#endif /* __ABI_CSKY_CACHEFLUSH_H */
+60 −0
Original line number Original line Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#include <linux/cache.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <asm/cache.h>

void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
	unsigned long start;

	start = (unsigned long) kmap_atomic(page);

	cache_wbinv_range(start, start + PAGE_SIZE);

	kunmap_atomic((void *)start);
}

void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long vaddr, int len)
{
	unsigned long kaddr;

	kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);

	cache_wbinv_range(kaddr, kaddr + len);

	kunmap_atomic((void *)kaddr);
}

void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
		      pte_t *pte)
{
	unsigned long addr, pfn;
	struct page *page;
	void *va;

	if (!(vma->vm_flags & VM_EXEC))
		return;

	pfn = pte_pfn(*pte);
	if (unlikely(!pfn_valid(pfn)))
		return;

	page = pfn_to_page(pfn);
	if (page == ZERO_PAGE(0))
		return;

	va = page_address(page);
	addr = (unsigned long) va;

	if (va == NULL && PageHighMem(page))
		addr = (unsigned long) kmap_atomic(page);

	cache_wbinv_range(addr, addr + PAGE_SIZE);

	if (va == NULL && PageHighMem(page))
		kunmap_atomic((void *) addr);
}
+46 −0
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef __ABI_CSKY_CACHEFLUSH_H
#define __ABI_CSKY_CACHEFLUSH_H

/* Keep includes the same across arches.  */
#include <linux/mm.h>

/*
 * The cache doesn't need to be flushed when TLB entries change when
 * the cache is mapped to physical memory, not virtual memory
 */
#define flush_cache_all()			do { } while (0)
#define flush_cache_mm(mm)			do { } while (0)
#define flush_cache_dup_mm(mm)			do { } while (0)

#define flush_cache_range(vma, start, end) \
	do { \
		if (vma->vm_flags & VM_EXEC) \
			icache_inv_all(); \
	} while (0)

#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page)			do { } while (0)
#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)

#define flush_icache_range(start, end)		cache_wbinv_range(start, end)

void flush_icache_page(struct vm_area_struct *vma, struct page *page);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long vaddr, int len);

#define flush_cache_vmap(start, end)		do { } while (0)
#define flush_cache_vunmap(start, end)		do { } while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
	memcpy(dst, src, len); \
	cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
	memcpy(dst, src, len)

#endif /* __ABI_CSKY_CACHEFLUSH_H */
+49 −0
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

#ifndef __ASM_CSKY_BARRIER_H
#define __ASM_CSKY_BARRIER_H

#ifndef __ASSEMBLY__

#define nop()	asm volatile ("nop\n":::"memory")

/*
 * sync:        completion barrier
 * sync.s:      completion barrier and shareable to other cores
 * sync.i:      completion barrier with flush cpu pipeline
 * sync.is:     completion barrier with flush cpu pipeline and shareable to
 *		other cores
 *
 * bar.brwarw:  ordering barrier for all load/store instructions before it
 * bar.brwarws: ordering barrier for all load/store instructions before it
 *						and shareable to other cores
 * bar.brar:    ordering barrier for all load       instructions before it
 * bar.brars:   ordering barrier for all load       instructions before it
 *						and shareable to other cores
 * bar.bwaw:    ordering barrier for all store      instructions before it
 * bar.bwaws:   ordering barrier for all store      instructions before it
 *						and shareable to other cores
 */

#ifdef CONFIG_CPU_HAS_CACHEV2
#define mb()		asm volatile ("bar.brwarw\n":::"memory")
#define rmb()		asm volatile ("bar.brar\n":::"memory")
#define wmb()		asm volatile ("bar.bwaw\n":::"memory")

#ifdef CONFIG_SMP
#define __smp_mb()	asm volatile ("bar.brwarws\n":::"memory")
#define __smp_rmb()	asm volatile ("bar.brars\n":::"memory")
#define __smp_wmb()	asm volatile ("bar.bwaws\n":::"memory")
#endif /* CONFIG_SMP */

#define sync_is()	asm volatile ("sync.is\n":::"memory")

#else /* !CONFIG_CPU_HAS_CACHEV2 */
#define mb()		asm volatile ("sync\n":::"memory")
#endif

#include <asm-generic/barrier.h>

#endif /* __ASSEMBLY__ */
#endif /* __ASM_CSKY_BARRIER_H */
Loading