Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80b29b6b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux

Pull csky updates from Guo Ren:
 "This round of csky subsystem just some fixups:

   - Fix mb() synchronization problem

   - Fix dma_alloc_coherent with PAGE_SO attribute

   - Fix cache_op failed when cross memory ZONEs

   - Optimize arch_sync_dma_for_cpu/device with dma_inv_range

   - Fix ioremap function losing

   - Fix arch_get_unmapped_area() implementation

   - Fix defer cache flush for 610

   - Support kernel non-aligned access

   - Fix 610 vipt cache flush mechanism

   - Fix add zero_fp fixup perf backtrace panic

   - Move static keyword to the front of declaration

   - Fix csky_pmu.max_period assignment

   - Use generic free_initrd_mem()

   - entry: Remove unneeded need_resched() loop"

* tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux:
  csky: Move static keyword to the front of declaration
  csky: entry: Remove unneeded need_resched() loop
  csky: Fixup csky_pmu.max_period assignment
  csky: Fixup add zero_fp fixup perf backtrace panic
  csky: Use generic free_initrd_mem()
  csky: Fixup 610 vipt cache flush mechanism
  csky: Support kernel non-aligned access
  csky: Fixup defer cache flush for 610
  csky: Fixup arch_get_unmapped_area() implementation
  csky: Fixup ioremap function losing
  csky: Optimize arch_sync_dma_for_cpu/device with dma_inv_range
  csky/dma: Fixup cache_op failed when cross memory ZONEs
  csky: Fixup dma_alloc_coherent with PAGE_SO attribute
  csky: Fixup mb() synchronization problem
parents cef0aa0c 9af032a3
Loading
Loading
Loading
Loading
+45 −17
Original line number Diff line number Diff line
@@ -5,8 +5,10 @@
#include <linux/uaccess.h>
#include <linux/ptrace.h>

static int align_enable = 1;
static int align_count;
static int align_kern_enable = 1;
static int align_usr_enable = 1;
static int align_kern_count = 0;
static int align_usr_count = 0;

static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx)
{
@@ -32,9 +34,6 @@ static int ldb_asm(uint32_t addr, uint32_t *valp)
	uint32_t val;
	int err;

	if (!access_ok((void *)addr, 1))
		return 1;

	asm volatile (
		"movi	%0, 0\n"
		"1:\n"
@@ -67,9 +66,6 @@ static int stb_asm(uint32_t addr, uint32_t val)
{
	int err;

	if (!access_ok((void *)addr, 1))
		return 1;

	asm volatile (
		"movi	%0, 0\n"
		"1:\n"
@@ -203,8 +199,6 @@ static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
	if (stb_asm(addr, byte3))
		return 1;

	align_count++;

	return 0;
}

@@ -226,7 +220,14 @@ void csky_alignment(struct pt_regs *regs)
	uint32_t addr   = 0;

	if (!user_mode(regs))
		goto kernel_area;

	if (!align_usr_enable) {
		pr_err("%s user disabled.\n", __func__);
		goto bad_area;
	}

	align_usr_count++;

	ret = get_user(tmp, (uint16_t *)instruction_pointer(regs));
	if (ret) {
@@ -234,6 +235,19 @@ void csky_alignment(struct pt_regs *regs)
		goto bad_area;
	}

	goto good_area;

kernel_area:
	if (!align_kern_enable) {
		pr_err("%s kernel disabled.\n", __func__);
		goto bad_area;
	}

	align_kern_count++;

	tmp = *(uint16_t *)instruction_pointer(regs);

good_area:
	opcode = (uint32_t)tmp;

	rx  = opcode & 0xf;
@@ -286,18 +300,32 @@ void csky_alignment(struct pt_regs *regs)
	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
}

static struct ctl_table alignment_tbl[4] = {
static struct ctl_table alignment_tbl[5] = {
	{
		.procname = "kernel_enable",
		.data = &align_kern_enable,
		.maxlen = sizeof(align_kern_enable),
		.mode = 0666,
		.proc_handler = &proc_dointvec
	},
	{
		.procname = "user_enable",
		.data = &align_usr_enable,
		.maxlen = sizeof(align_usr_enable),
		.mode = 0666,
		.proc_handler = &proc_dointvec
	},
	{
		.procname = "enable",
		.data = &align_enable,
		.maxlen = sizeof(align_enable),
		.procname = "kernel_count",
		.data = &align_kern_count,
		.maxlen = sizeof(align_kern_count),
		.mode = 0666,
		.proc_handler = &proc_dointvec
	},
	{
		.procname = "count",
		.data = &align_count,
		.maxlen = sizeof(align_count),
		.procname = "user_count",
		.data = &align_usr_count,
		.maxlen = sizeof(align_usr_count),
		.mode = 0666,
		.proc_handler = &proc_dointvec
	},
+47 −23
Original line number Diff line number Diff line
@@ -11,42 +11,66 @@
#include <asm/cacheflush.h>
#include <asm/cachectl.h>

#define PG_dcache_clean		PG_arch_1

void flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;
	struct address_space *mapping;

	if (mapping && !mapping_mapped(mapping)) {
		set_bit(PG_arch_1, &(page)->flags);
	if (page == ZERO_PAGE(0))
		return;
	}

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	addr = (unsigned long) page_address(page);
	dcache_wb_range(addr, addr + PAGE_SIZE);
	mapping = page_mapping_file(page);

	if (mapping && !page_mapcount(page))
		clear_bit(PG_dcache_clean, &page->flags);
	else {
		dcache_wbinv_all();
		if (mapping)
			icache_inv_all();
		set_bit(PG_dcache_clean, &page->flags);
	}
}
EXPORT_SYMBOL(flush_dcache_page);

void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
		      pte_t *pte)
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
	pte_t *ptep)
{
	unsigned long addr;
	unsigned long pfn = pte_pfn(*ptep);
	struct page *page;
	unsigned long pfn;

	pfn = pte_pfn(*pte);
	if (unlikely(!pfn_valid(pfn)))
	if (!pfn_valid(pfn))
		return;

	page = pfn_to_page(pfn);
	addr = (unsigned long) page_address(page);
	if (page == ZERO_PAGE(0))
		return;

	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
		dcache_wbinv_all();

	if (vma->vm_flags & VM_EXEC ||
	    pages_do_alias(addr, address & PAGE_MASK))
		cache_wbinv_all();
	if (page_mapping_file(page)) {
		if (vma->vm_flags & VM_EXEC)
			icache_inv_all();
	}
}

void flush_kernel_dcache_page(struct page *page)
{
	struct address_space *mapping;

	mapping = page_mapping_file(page);

	if (!mapping || mapping_mapped(mapping))
		dcache_wbinv_all();
}
EXPORT_SYMBOL(flush_kernel_dcache_page);

void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	dcache_wbinv_all();

	clear_bit(PG_arch_1, &(page)->flags);
	if (vma->vm_flags & VM_EXEC)
		icache_inv_all();
}
+31 −14
Original line number Diff line number Diff line
@@ -4,46 +4,63 @@
#ifndef __ABI_CSKY_CACHEFLUSH_H
#define __ABI_CSKY_CACHEFLUSH_H

#include <linux/compiler.h>
#include <linux/mm.h>
#include <asm/string.h>
#include <asm/cache.h>

#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);

#define flush_cache_mm(mm)			cache_wbinv_all()
#define flush_cache_mm(mm)			dcache_wbinv_all()
#define flush_cache_page(vma, page, pfn)	cache_wbinv_all()
#define flush_cache_dup_mm(mm)			cache_wbinv_all()

#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
extern void flush_kernel_dcache_page(struct page *);

#define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)

static inline void flush_kernel_vmap_range(void *addr, int size)
{
	dcache_wbinv_all();
}
static inline void invalidate_kernel_vmap_range(void *addr, int size)
{
	dcache_wbinv_all();
}

#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma,
			 struct page *page, unsigned long vmaddr)
{
	if (PageAnon(page))
		cache_wbinv_all();
}

/*
 * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
 * Use cache_wbinv_all() here and need to be improved in future.
 */
#define flush_cache_range(vma, start, end)	cache_wbinv_all()
#define flush_cache_vmap(start, end)		cache_wbinv_range(start, end)
#define flush_cache_vunmap(start, end)		cache_wbinv_range(start, end)
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end)		cache_wbinv_all()
#define flush_cache_vunmap(start, end)		cache_wbinv_all()

#define flush_icache_page(vma, page)		cache_wbinv_all()
#define flush_icache_page(vma, page)		do {} while (0);
#define flush_icache_range(start, end)		cache_wbinv_range(start, end)

#define flush_icache_user_range(vma, pg, adr, len) \
				cache_wbinv_range(adr, adr + len)
#define flush_icache_user_range(vma,page,addr,len) \
	flush_dcache_page(page)

#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
	cache_wbinv_all(); \
	memcpy(dst, src, len); \
	cache_wbinv_all(); \
} while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
	cache_wbinv_all(); \
	memcpy(dst, src, len); \
	cache_wbinv_all(); \
} while (0)

#define flush_dcache_mmap_lock(mapping)		do {} while (0)
#define flush_dcache_mmap_unlock(mapping)	do {} while (0)

#endif /* __ABI_CSKY_CACHEFLUSH_H */
+3 −2
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.

extern unsigned long shm_align_mask;
#include <asm/shmparam.h>

extern void flush_dcache_page(struct page *page);

static inline unsigned long pages_do_alias(unsigned long addr1,
					   unsigned long addr2)
{
	return (addr1 ^ addr2) & shm_align_mask;
	return (addr1 ^ addr2) & (SHMLBA-1);
}

static inline void clear_user_page(void *addr, unsigned long vaddr,
+40 −35
Original line number Diff line number Diff line
@@ -9,58 +9,63 @@
#include <linux/random.h>
#include <linux/io.h>

unsigned long shm_align_mask = (0x4000 >> 1) - 1;   /* Sane caches */

#define COLOUR_ALIGN(addr,pgoff)		\
	((((addr) + shm_align_mask) & ~shm_align_mask) + \
	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))

unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/*
 * We need to ensure that shared mappings are correctly aligned to
 * avoid aliasing issues with VIPT caches.  We need to ensure that
 * a specific page of an object is always mapped at a multiple of
 * SHMLBA bytes.
 *
 * We unconditionally provide this function for all cases.
 */
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct vm_area_struct *vmm;
	int do_color_align;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	int do_align = 0;
	struct vm_unmapped_area_info info;

	if (flags & MAP_FIXED) {
	/*
		 * We do not accept a shared mapping if it would violate
		 * cache aliasing constraints.
	 * We only need to do colour alignment if either the I or D
	 * caches alias.
	 */
		if ((flags & MAP_SHARED) &&
			((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
	do_align = filp || (flags & MAP_SHARED);

	/*
	 * We enforce the MAP_FIXED case.
	 */
	if (flags & MAP_FIXED) {
		if (flags & MAP_SHARED &&
		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
			return -EINVAL;
		return addr;
	}

	if (len > TASK_SIZE)
		return -ENOMEM;
	do_color_align = 0;
	if (filp || (flags & MAP_SHARED))
		do_color_align = 1;

	if (addr) {
		if (do_color_align)
		if (do_align)
			addr = COLOUR_ALIGN(addr, pgoff);
		else
			addr = PAGE_ALIGN(addr);
		vmm = find_vma(current->mm, addr);

		vma = find_vma(mm, addr);
		if (TASK_SIZE - len >= addr &&
				(!vmm || addr + len <= vmm->vm_start))
		    (!vma || addr + len <= vm_start_gap(vma)))
			return addr;
	}
	addr = TASK_UNMAPPED_BASE;
	if (do_color_align)
		addr = COLOUR_ALIGN(addr, pgoff);
	else
		addr = PAGE_ALIGN(addr);

	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
		/* At this point: (!vmm || addr < vmm->vm_end). */
		if (TASK_SIZE - len < addr)
			return -ENOMEM;
		if (!vmm || addr + len <= vmm->vm_start)
			return addr;
		addr = vmm->vm_end;
		if (do_color_align)
			addr = COLOUR_ALIGN(addr, pgoff);
	}
	info.flags = 0;
	info.length = len;
	info.low_limit = mm->mmap_base;
	info.high_limit = TASK_SIZE;
	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
	info.align_offset = pgoff << PAGE_SHIFT;
	return vm_unmapped_area(&info);
}
Loading