Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 11ab3f3d authored by Chen Liqin's avatar Chen Liqin
Browse files

score: add flush_dcahce_page and PG_dcache_dirty define



Signed-off-by: default avatarCui Bixiong <bixiong@sunnorth.com.cn>
Signed-off-by: default avatarChen Liqin <liqin.chen@sunplusct.com>

	modified:   arch/score/include/asm/cacheflush.h
	modified:   arch/score/mm/cache.c
parent 718deb6b
Loading
Loading
Loading
Loading
+3 −1
Original line number Original line Diff line number Diff line
@@ -14,10 +14,12 @@ extern void flush_cache_sigtramp(unsigned long addr);
extern void flush_icache_all(void);
extern void flush_icache_all(void);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_dcache_range(unsigned long start, unsigned long end);
extern void flush_dcache_range(unsigned long start, unsigned long end);
extern void flush_dcache_page(struct page *page);

#define PG_dcache_dirty         PG_arch_1


#define flush_cache_dup_mm(mm)			do {} while (0)
#define flush_cache_dup_mm(mm)			do {} while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page)			do {} while (0)
#define flush_dcache_mmap_lock(mapping)		do {} while (0)
#define flush_dcache_mmap_lock(mapping)		do {} while (0)
#define flush_dcache_mmap_unlock(mapping)	do {} while (0)
#define flush_dcache_mmap_unlock(mapping)	do {} while (0)
#define flush_cache_vmap(start, end)		do {} while (0)
#define flush_cache_vmap(start, end)		do {} while (0)
+24 −2
Original line number Original line Diff line number Diff line
@@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/fs.h>


#include <asm/mmu_context.h>
#include <asm/mmu_context.h>


@@ -51,6 +52,27 @@ static void flush_data_cache_page(unsigned long addr)
	}
	}
}
}


void flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (PageHighMem(page))
		return;
	if (mapping && !mapping_mapped(mapping)) {
		set_bit(PG_dcache_dirty, &(page)->flags);
		return;
	}

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
}

/* called by update_mmu_cache. */
/* called by update_mmu_cache. */
void __update_cache(struct vm_area_struct *vma, unsigned long address,
void __update_cache(struct vm_area_struct *vma, unsigned long address,
		pte_t pte)
		pte_t pte)
@@ -63,11 +85,11 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
	if (unlikely(!pfn_valid(pfn)))
	if (unlikely(!pfn_valid(pfn)))
		return;
		return;
	page = pfn_to_page(pfn);
	page = pfn_to_page(pfn);
	if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
	if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) {
		addr = (unsigned long) page_address(page);
		addr = (unsigned long) page_address(page);
		if (exec)
		if (exec)
			flush_data_cache_page(addr);
			flush_data_cache_page(addr);
		clear_bit(PG_arch_1, &page->flags);
		clear_bit(PG_dcache_dirty, &(page)->flags);
	}
	}
}
}