Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0c64a4fc authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ARM: msm: add 64-bit IO accessor support"

parents b5c0a039 30216207
Loading
Loading
Loading
Loading
+122 −18
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
#include <linux/msm_rtb.h>
#include <xen/xen.h>

/*
@@ -61,23 +62,24 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
 * the bus. Rather than special-case the machine, just let the compiler
 * generate the access for CPUs prior to ARMv6.
 */
#define __raw_readw(a)         (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
#define __raw_writew(v,a)      ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
#define __raw_readw_no_log(a)		(__chk_io_ptr(a), \
					*(volatile unsigned short __force *)(a))
#define __raw_writew_no_log(v, a)      ((void)(__chk_io_ptr(a), \
					*(volatile unsigned short __force *)\
					(a) = (v)))
#else
/*
 * When running under a hypervisor, we want to avoid I/O accesses with
 * writeback addressing modes as these incur a significant performance
 * overhead (the address generation must be emulated in software).
 */
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
{
	asm volatile("strh %1, %0"
		     : : "Q" (*(volatile u16 __force *)addr), "r" (val));
}

#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
{
	u16 val;
	asm volatile("ldrh %0, %1"
@@ -87,22 +89,29 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
}
#endif

#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
{
	asm volatile("strb %1, %0"
		     : : "Qo" (*(volatile u8 __force *)addr), "r" (val));
}

#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
{
	asm volatile("str %1, %0"
		     : : "Qo" (*(volatile u32 __force *)addr), "r" (val));
}

#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
{
	register u64 v asm("r2");

	v = val;

	asm volatile("strd %1, %0"
		     : : "Qo" (*(volatile u64 __force *)addr), "r" (val));
}

static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
{
	u8 val;
	asm volatile("ldrb %0, %1"
@@ -111,8 +120,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
	return val;
}

#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
{
	u32 val;
	asm volatile("ldr %0, %1"
@@ -121,6 +129,58 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
	return val;
}

static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
{
	register u64 val asm ("r2");

	asm volatile("ldrd %1, %0"
		     : "+Qo" (*(volatile u64 __force *)addr),
		       "=r" (val));
	return val;
}

/*
 * There may be cases when clients don't want to support or can't support the
 * logging. The appropriate functions can be used but clients should carefully
 * consider why they can't support the logging.
 */

#define __raw_write_logged(v, a, _t)	({ \
	int _ret; \
	volatile void __iomem *_a = (a); \
	void *_addr = (void __force *)(_a); \
	_ret = uncached_logk(LOGK_WRITEL, _addr); \
	ETB_WAYPOINT; \
	__raw_write##_t##_no_log((v), _a); \
	if (_ret) \
		LOG_BARRIER; \
	})


#define __raw_writeb(v, a)	__raw_write_logged((v), (a), b)
#define __raw_writew(v, a)	__raw_write_logged((v), (a), w)
#define __raw_writel(v, a)	__raw_write_logged((v), (a), l)
#define __raw_writeq(v, a)	__raw_write_logged((v), (a), q)

#define __raw_read_logged(a, _l, _t)		({ \
	unsigned _t __a; \
	const volatile void __iomem *_a = (a); \
	void *_addr = (void __force *)(_a); \
	int _ret; \
	_ret = uncached_logk(LOGK_READL, _addr); \
	ETB_WAYPOINT; \
	__a = __raw_read##_l##_no_log(_a);\
	if (_ret) \
		LOG_BARRIER; \
	__a; \
	})


#define __raw_readb(a)		__raw_read_logged((a), b, char)
#define __raw_readw(a)		__raw_read_logged((a), w, short)
#define __raw_readl(a)		__raw_read_logged((a), l, int)
#define __raw_readq(a)		__raw_read_logged((a), q, long long)

/*
 * Architecture ioremap implementation.
 */
@@ -300,18 +360,36 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
					__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
					__raw_readl(c)); __r; })
#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \
					__raw_readq(c)); __r; })
#define readl_relaxed_no_log(c) ({ u32 __r = le32_to_cpu((__force __le32) \
					__raw_readl_no_log(c)); __r; })
#define readq_relaxed_no_log(c) ({ u64 __r = le64_to_cpu((__force __le64) \
					__raw_readq_no_log(c)); __r; })


#define writeb_relaxed(v, c)	__raw_writeb(v, c)
#define writew_relaxed(v, c)	__raw_writew((__force u16) cpu_to_le16(v), c)
#define writel_relaxed(v, c)	__raw_writel((__force u32) cpu_to_le32(v), c)
#define writeq_relaxed(v, c)    __raw_writeq((__force u64) cpu_to_le64(v), c)
#define writeb_relaxed_no_log(v, c)    ((void)__raw_writeb_no_log((v), (c)))
#define writew_relaxed_no_log(v, c) __raw_writew_no_log((__force u16) \
					cpu_to_le16(v), c)
#define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) \
					cpu_to_le32(v), c)
#define writeq_relaxed_no_log(v, c) __raw_writeq_no_log((__force u64) \
					cpu_to_le64(v), c)

#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
#define readq(c)		({ u64 __v = readq_relaxed(c)\
					; __iormb(); __v; })

#define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
#define writeq(v, c)		({ __iowmb(); writeq_relaxed(v, c); })

#define readsb(p,d,l)		__raw_readsb(p,d,l)
#define readsw(p,d,l)		__raw_readsw(p,d,l)
@@ -419,6 +497,31 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);

void iounmap(volatile void __iomem *iomem_cookie);
#define iounmap iounmap
/*
 * io{read,write}{8,16,32,64} macros
 */
#ifndef ioread8
#define ioread8(p)	({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
#define ioread16(p)	({ unsigned int __v = le16_to_cpu((__force __le16)\
				__raw_readw(p)); __iormb(); __v; })
#define ioread32(p)	({ unsigned int __v = le32_to_cpu((__force __le32)\
				__raw_readl(p)); __iormb(); __v; })
#define ioread64(p)	({ unsigned int __v = le64_to_cpu((__force __le64)\
				__raw_readq(p)); __iormb(); __v; })

#define ioread64be(p)	({ unsigned int __v = be64_to_cpu((__force __be64)\
				__raw_readq(p)); __iormb(); __v; })

#define iowrite8(v, p)	({ __iowmb(); __raw_writeb(v, p); })
#define iowrite16(v, p)	({ __iowmb(); __raw_writew((__force __u16)\
				cpu_to_le16(v), p); })
#define iowrite32(v, p)	({ __iowmb(); __raw_writel((__force __u32)\
				cpu_to_le32(v), p); })
#define iowrite64(v, p)	({ __iowmb(); __raw_writeq((__force __u64)\
				cpu_to_le64(v), p); })

#define iowrite64be(v, p) ({ __iowmb(); __raw_writeq((__force __u64)\
				cpu_to_be64(v), p); })

void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
#define arch_memremap_wb arch_memremap_wb
@@ -440,6 +543,7 @@ extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
#define ioport_unmap ioport_unmap
extern void ioport_unmap(void __iomem *addr);
#endif
#endif

struct pci_dev;