Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d3251005 authored by Tejun Heo's avatar Tejun Heo
Browse files

x86: convert cacheflush macros inline functions



Impact: cleanup

Unused macro parameters cause spurious unused variable warnings.
Convert all cacheflush macros to inline functions to avoid the
warnings and achieve better type checking.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 24ff9542
Loading
Loading
Loading
Loading
+36 −17
Original line number Original line Diff line number Diff line
@@ -5,24 +5,43 @@
#include <linux/mm.h>
#include <linux/mm.h>


/* Caches aren't brain-dead on the intel. */
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all()			do { } while (0)
static inline void flush_cache_all(void) { }
#define flush_cache_mm(mm)			do { } while (0)
static inline void flush_cache_mm(struct mm_struct *mm) { }
#define flush_cache_dup_mm(mm)			do { } while (0)
static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
#define flush_cache_range(vma, start, end)	do { } while (0)
static inline void flush_cache_range(struct vm_area_struct *vma,
#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
				     unsigned long start, unsigned long end) { }
#define flush_dcache_page(page)			do { } while (0)
static inline void flush_cache_page(struct vm_area_struct *vma,
#define flush_dcache_mmap_lock(mapping)		do { } while (0)
				    unsigned long vmaddr, unsigned long pfn) { }
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
static inline void flush_dcache_page(struct page *page) { }
#define flush_icache_range(start, end)		do { } while (0)
static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
#define flush_icache_page(vma, pg)		do { } while (0)
static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
#define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
static inline void flush_icache_range(unsigned long start,
#define flush_cache_vmap(start, end)		do { } while (0)
				      unsigned long end) { }
#define flush_cache_vunmap(start, end)		do { } while (0)
static inline void flush_icache_page(struct vm_area_struct *vma,
				     struct page *page) { }
static inline void flush_icache_user_range(struct vm_area_struct *vma,
					   struct page *page,
					   unsigned long addr,
					   unsigned long len) { }
static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
static inline void flush_cache_vunmap(unsigned long start,
				      unsigned long end) { }


#define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
static inline void copy_to_user_page(struct vm_area_struct *vma,
	memcpy((dst), (src), (len))
				     struct page *page, unsigned long vaddr,
#define copy_from_user_page(vma, page, vaddr, dst, src, len)	\
				     void *dst, const void *src,
	memcpy((dst), (src), (len))
				     unsigned long len)
{
	memcpy(dst, src, len);
}

static inline void copy_from_user_page(struct vm_area_struct *vma,
				       struct page *page, unsigned long vaddr,
				       void *dst, const void *src,
				       unsigned long len)
{
	memcpy(dst, src, len);
}


#define PG_non_WB				PG_arch_1
#define PG_non_WB				PG_arch_1
PAGEFLAG(NonWB, non_WB)
PAGEFLAG(NonWB, non_WB)