Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 10c9c10c authored by GuanXuetao's avatar GuanXuetao
Browse files

unicore32 core architecture: mm related: consistent device DMA handling



This patch implements consistent device DMA handling of memory management.
DMA device operations are also here.

Signed-off-by: default avatarGuan Xuetao <gxt@mprc.pku.edu.cn>
Reviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
parent 56372b0b
Loading
Loading
Loading
Loading
+211 −0
Original line number Diff line number Diff line
/*
 * linux/arch/unicore32/include/asm/cacheflush.h
 *
 * Code specific to PKUnity SoC and UniCore ISA
 *
 * Copyright (C) 2001-2010 GUAN Xue-tao
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef __UNICORE_CACHEFLUSH_H__
#define __UNICORE_CACHEFLUSH_H__

#include <linux/mm.h>

#include <asm/shmparam.h>

#define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)

/*
 * This flag is used to indicate that the page pointed to by a pte is clean
 * and does not require cleaning before returning it to the user.
 */
#define PG_dcache_clean PG_arch_1

/*
 *	MM Cache Management
 *	===================
 *
 *	The arch/unicore32/mm/cache.S files implement these methods.
 *
 *	Start addresses are inclusive and end addresses are exclusive;
 *	start addresses should be rounded down, end addresses up.
 *
 *	See Documentation/cachetlb.txt for more information.
 *	Please note that the implementation of these, and the required
 *	effects are cache-type (VIVT/VIPT/PIPT) specific.
 *
 *	flush_icache_all()
 *
 *		Unconditionally clean and invalidate the entire icache.
 *		Currently only needed for cache-v6.S and cache-v7.S, see
 *		__flush_icache_all for the generic implementation.
 *
 *	flush_kern_all()
 *
 *		Unconditionally clean and invalidate the entire cache.
 *
 *	flush_user_all()
 *
 *		Clean and invalidate all user space cache entries
 *		before a change of page tables.
 *
 *	flush_user_range(start, end, flags)
 *
 *		Clean and invalidate a range of cache entries in the
 *		specified address space before a change of page tables.
 *		- start - user start address (inclusive, page aligned)
 *		- end   - user end address   (exclusive, page aligned)
 *		- flags - vma->vm_flags field
 *
 *	coherent_kern_range(start, end)
 *
 *		Ensure coherency between the Icache and the Dcache in the
 *		region described by start, end.  If you have non-snooping
 *		Harvard caches, you need to implement this function.
 *		- start  - virtual start address
 *		- end    - virtual end address
 *
 *	coherent_user_range(start, end)
 *
 *		Ensure coherency between the Icache and the Dcache in the
 *		region described by start, end.  If you have non-snooping
 *		Harvard caches, you need to implement this function.
 *		- start  - virtual start address
 *		- end    - virtual end address
 *
 *	flush_kern_dcache_area(kaddr, size)
 *
 *		Ensure that the data held in page is written back.
 *		- kaddr  - page address
 *		- size   - region size
 *
 *	DMA Cache Coherency
 *	===================
 *
 *	dma_flush_range(start, end)
 *
 *		Clean and invalidate the specified virtual address range.
 *		- start  - virtual start address
 *		- end    - virtual end address
 */

extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_area(void *, size_t);
extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size);

/*
 * These are private to the dma-mapping API.  Do not use directly.
 * Their sole purpose is to ensure that data held in the cache
 * is visible to DMA, or data written by DMA to system memory is
 * visible to the CPU.
 */
extern void __cpuc_dma_clean_range(unsigned long, unsigned long);
extern void __cpuc_dma_flush_range(unsigned long, unsigned long);

/*
 * Copy user data from/to a page which is mapped into a different
 * processes address space.  Really, we want to allow our "user
 * space" model to handle this.
 */
extern void copy_to_user_page(struct vm_area_struct *, struct page *,
	unsigned long, void *, const void *, unsigned long);
#define copy_from_user_page(vma, page, vaddr, dst, src, len)	\
	do {							\
		memcpy(dst, src, len);				\
	} while (0)

/*
 * Convert calls to our calling convention.
 */
/* Invalidate I-cache */
static inline void __flush_icache_all(void)
{
	asm("movc	p0.c5, %0, #20;\n"
	    "nop; nop; nop; nop; nop; nop; nop; nop\n"
	    :
	    : "r" (0));
}

#define flush_cache_all()		__cpuc_flush_kern_all()

extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma,
		unsigned long user_addr, unsigned long pfn);

#define flush_cache_dup_mm(mm) flush_cache_mm(mm)

/*
 * flush_cache_user_range is used when we want to ensure that the
 * Harvard caches are synchronised for the user space address range.
 * This is used for the UniCore private sys_cacheflush system call.
 */
#define flush_cache_user_range(vma, start, end) \
	__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))

/*
 * Perform necessary cache operations to ensure that data previously
 * stored within this range of addresses can be executed by the CPU.
 */
#define flush_icache_range(s, e)	__cpuc_coherent_kern_range(s, e)

/*
 * Perform necessary cache operations to ensure that the TLB will
 * see data written in the specified area.
 */
#define clean_dcache_area(start, size)	cpu_dcache_clean_area(start, size)

/*
 * flush_dcache_page is used when the kernel has written to the page
 * cache page at virtual address page->virtual.
 *
 * If this page isn't mapped (ie, page_mapping == NULL), or it might
 * have userspace mappings, then we _must_ always clean + invalidate
 * the dcache entries associated with the kernel mapping.
 *
 * Otherwise we can defer the operation, and clean the cache when we are
 * about to change to user space.  This is the same method as used on SPARC64.
 * See update_mmu_cache for the user space part.
 */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);

#define flush_dcache_mmap_lock(mapping)			\
	spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping)		\
	spin_unlock_irq(&(mapping)->tree_lock)

#define flush_icache_user_range(vma, page, addr, len)	\
	flush_dcache_page(page)

/*
 * We don't appear to need to do anything here.  In fact, if we did, we'd
 * duplicate cache flushing elsewhere performed by flush_dcache_page().
 */
#define flush_icache_page(vma, page)	do { } while (0)

/*
 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
 * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
 * caches, since the direct-mappings of these pages may contain cached
 * data, we need to do a full cache flush to ensure that writebacks
 * don't corrupt data placed into these pages via the new mappings.
 */
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}

static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}

#endif
+124 −0
Original line number Diff line number Diff line
/*
 * linux/arch/unicore32/include/asm/dma-mapping.h
 *
 * Code specific to PKUnity SoC and UniCore ISA
 *
 * Copyright (C) 2001-2010 GUAN Xue-tao
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef __UNICORE_DMA_MAPPING_H__
#define __UNICORE_DMA_MAPPING_H__

#ifdef __KERNEL__

#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/swiotlb.h>

#include <asm-generic/dma-coherent.h>

#include <asm/memory.h>
#include <asm/cacheflush.h>

extern struct dma_map_ops swiotlb_dma_map_ops;

static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
	return &swiotlb_dma_map_ops;
}

static inline int dma_supported(struct device *dev, u64 mask)
{
	struct dma_map_ops *dma_ops = get_dma_ops(dev);

	if (unlikely(dma_ops == NULL))
		return 0;

	return dma_ops->dma_supported(dev, mask);
}

static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	struct dma_map_ops *dma_ops = get_dma_ops(dev);

	if (dma_ops->mapping_error)
		return dma_ops->mapping_error(dev, dma_addr);

	return 0;
}

#include <asm-generic/dma-mapping-common.h>

static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
	if (dev && dev->dma_mask)
		return addr + size - 1 <= *dev->dma_mask;

	return 1;
}

static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
	return paddr;
}

static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
	return daddr;
}

static inline void dma_mark_clean(void *addr, size_t size) {}

static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}

static inline void *dma_alloc_coherent(struct device *dev, size_t size,
				       dma_addr_t *dma_handle, gfp_t flag)
{
	struct dma_map_ops *dma_ops = get_dma_ops(dev);

	return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
}

static inline void dma_free_coherent(struct device *dev, size_t size,
				     void *cpu_addr, dma_addr_t dma_handle)
{
	struct dma_map_ops *dma_ops = get_dma_ops(dev);

	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
}

#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)

static inline void dma_cache_sync(struct device *dev, void *vaddr,
		size_t size, enum dma_data_direction direction)
{
	unsigned long start = (unsigned long)vaddr;
	unsigned long end   = start + size;

	switch (direction) {
	case DMA_NONE:
		BUG();
	case DMA_FROM_DEVICE:
	case DMA_BIDIRECTIONAL:	/* writeback and invalidate */
		__cpuc_dma_flush_range(start, end);
		break;
	case DMA_TO_DEVICE:		/* writeback only */
		__cpuc_dma_clean_range(start, end);
		break;
	}
}

#endif /* __KERNEL__ */
#endif
+23 −0
Original line number Diff line number Diff line
/*
 * linux/arch/unicore32/include/asm/dma.h
 *
 * Code specific to PKUnity SoC and UniCore ISA
 *
 * Copyright (C) 2001-2010 GUAN Xue-tao
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#ifndef __UNICORE_DMA_H__
#define __UNICORE_DMA_H__

#include <asm/memory.h>
#include <asm-generic/dma.h>

#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#endif

#endif /* __UNICORE_DMA_H__ */
+195 −0
Original line number Diff line number Diff line
/*
 * linux/arch/unicore32/include/asm/tlbflush.h
 *
 * Code specific to PKUnity SoC and UniCore ISA
 *
 * Copyright (C) 2001-2010 GUAN Xue-tao
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef __UNICORE_TLBFLUSH_H__
#define __UNICORE_TLBFLUSH_H__

#ifndef __ASSEMBLY__

#include <linux/sched.h>

extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long,
					struct vm_area_struct *);
extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);

/*
 *	TLB Management
 *	==============
 *
 *	The arch/unicore/mm/tlb-*.S files implement these methods.
 *
 *	The TLB specific code is expected to perform whatever tests it
 *	needs to determine if it should invalidate the TLB for each
 *	call.  Start addresses are inclusive and end addresses are
 *	exclusive; it is safe to round these addresses down.
 *
 *	flush_tlb_all()
 *
 *		Invalidate the entire TLB.
 *
 *	flush_tlb_mm(mm)
 *
 *		Invalidate all TLB entries in a particular address
 *		space.
 *		- mm	- mm_struct describing address space
 *
 *	flush_tlb_range(mm,start,end)
 *
 *		Invalidate a range of TLB entries in the specified
 *		address space.
 *		- mm	- mm_struct describing address space
 *		- start - start address (may not be aligned)
 *		- end	- end address (exclusive, may not be aligned)
 *
 *	flush_tlb_page(vaddr,vma)
 *
 *		Invalidate the specified page in the specified address range.
 *		- vaddr - virtual address (may not be aligned)
 *		- vma	- vma_struct describing address range
 *
 *	flush_kern_tlb_page(kaddr)
 *
 *		Invalidate the TLB entry for the specified page.  The address
 *		will be in the kernels virtual memory space.  Current uses
 *		only require the D-TLB to be invalidated.
 *		- kaddr - Kernel virtual memory address
 */

static inline void local_flush_tlb_all(void)
{
	const int zero = 0;

	/* TLB invalidate all */
	asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
		: : "r" (zero) : "cc");
}

static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
	const int zero = 0;

	if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
		/* TLB invalidate all */
		asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
			: : "r" (zero) : "cc");
	}
	put_cpu();
}

static inline void
local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
		/* iTLB invalidate page */
		asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
			: : "r" (uaddr & PAGE_MASK) : "cc");
		/* dTLB invalidate page */
		asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
			: : "r" (uaddr & PAGE_MASK) : "cc");
#else
		/* TLB invalidate all */
		asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
			: : "r" (uaddr & PAGE_MASK) : "cc");
#endif
	}
}

static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
{
#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
	/* iTLB invalidate page */
	asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
		: : "r" (kaddr & PAGE_MASK) : "cc");
	/* dTLB invalidate page */
	asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
		: : "r" (kaddr & PAGE_MASK) : "cc");
#else
	/* TLB invalidate all */
	asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
		: : "r" (kaddr & PAGE_MASK) : "cc");
#endif
}

/*
 *	flush_pmd_entry
 *
 *	Flush a PMD entry (word aligned, or double-word aligned) to
 *	RAM if the TLB for the CPU we are running on requires this.
 *	This is typically used when we are creating PMD entries.
 *
 *	clean_pmd_entry
 *
 *	Clean (but don't drain the write buffer) if the CPU requires
 *	these operations.  This is typically used when we are removing
 *	PMD entries.
 */
static inline void flush_pmd_entry(pmd_t *pmd)
{
#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
	/* flush dcache line, see dcacheline_flush in proc-macros.S */
	asm("mov	r1, %0 << #20\n"
		"ldw	r2, =_stext\n"
		"add	r2, r2, r1 >> #20\n"
		"ldw	r1, [r2+], #0x0000\n"
		"ldw	r1, [r2+], #0x1000\n"
		"ldw	r1, [r2+], #0x2000\n"
		"ldw	r1, [r2+], #0x3000\n"
		: : "r" (pmd) : "r1", "r2");
#else
	/* flush dcache all */
	asm("movc p0.c5, %0, #14; nop; nop; nop; nop; nop; nop; nop; nop"
		: : "r" (pmd) : "cc");
#endif
}

static inline void clean_pmd_entry(pmd_t *pmd)
{
#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
	/* clean dcache line */
	asm("movc p0.c5, %0, #11; nop; nop; nop; nop; nop; nop; nop; nop"
		: : "r" (__pa(pmd) & ~(L1_CACHE_BYTES - 1)) : "cc");
#else
	/* clean dcache all */
	asm("movc p0.c5, %0, #10; nop; nop; nop; nop; nop; nop; nop; nop"
		: : "r" (pmd) : "cc");
#endif
}

/*
 * Convert calls to our calling convention.
 */
#define local_flush_tlb_range(vma, start, end)	\
	__cpu_flush_user_tlb_range(start, end, vma)
#define local_flush_tlb_kernel_range(s, e)	\
	__cpu_flush_kern_tlb_range(s, e)

#define flush_tlb_all		local_flush_tlb_all
#define flush_tlb_mm		local_flush_tlb_mm
#define flush_tlb_page		local_flush_tlb_page
#define flush_tlb_kernel_page	local_flush_tlb_kernel_page
#define flush_tlb_range		local_flush_tlb_range
#define flush_tlb_kernel_range	local_flush_tlb_kernel_range

/*
 * if PG_dcache_clean is not set for the page, we need to ensure that any
 * cache entries for the kernels virtual memory range are written
 * back to the page.
 */
extern void update_mmu_cache(struct vm_area_struct *vma,
		unsigned long addr, pte_t *ptep);

extern void do_bad_area(unsigned long addr, unsigned int fsr,
		struct pt_regs *regs);

#endif

#endif
+41 −0
Original line number Diff line number Diff line
/*
 * linux/arch/unicore32/include/mach/dma.h
 *
 * Code specific to PKUnity SoC and UniCore ISA
 *
 * Copyright (C) 2001-2010 GUAN Xue-tao
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef __MACH_PUV3_DMA_H__
#define __MACH_PUV3_DMA_H__

/*
 * The PKUnity has six internal DMA channels.
 */
#define MAX_DMA_CHANNELS	6

typedef enum {
	DMA_PRIO_HIGH = 0,
	DMA_PRIO_MEDIUM = 1,
	DMA_PRIO_LOW = 2
} puv3_dma_prio;

/*
 * DMA registration
 */

extern int puv3_request_dma(char *name,
			 puv3_dma_prio prio,
			 void (*irq_handler)(int, void *),
			 void (*err_handler)(int, void *),
			 void *data);

extern void puv3_free_dma(int dma_ch);

#define puv3_stop_dma(ch)		(DMAC_CONFIG(ch) &= ~DMAC_CONFIG_EN)
#define puv3_resume_dma(ch)             (DMAC_CONFIG(ch) |= DMAC_CONFIG_EN)

#endif /* __MACH_PUV3_DMA_H__ */
Loading