Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4368902b authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Vineet Gupta
Browse files

ARC: Add support for ioremap_prot API



Implement ioremap_prot() to allow mapping IO memory with variable
protection
via TLB.

Implementing this allows the /dev/mem driver to use its generic access()
VMA callback, which in turn allows ptrace to examine data in memory
mapped regions mapped via /dev/mem, such as Arc DCCM.

The end result is that it is possible to examine values of variables
placed into DCCM in user space programs via GDB.

CC: Alexey Brodkin <Alexey.Brodkin@synopsys.com>
CC: Noam Camus <noamc@ezchip.com>
Acked-by: default avatarVineet Gupta <vgupta@synopsys.com>
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent 8c2f4a8d
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -25,6 +25,7 @@ config ARC
	select HAVE_ARCH_KGDB
	select HAVE_ARCH_KGDB
	select HAVE_ARCH_TRACEHOOK
	select HAVE_ARCH_TRACEHOOK
	select HAVE_GENERIC_HARDIRQS
	select HAVE_GENERIC_HARDIRQS
	select HAVE_IOREMAP_PROT
	select HAVE_IRQ_WORK
	select HAVE_IRQ_WORK
	select HAVE_KPROBES
	select HAVE_KPROBES
	select HAVE_KRETPROBES
	select HAVE_KRETPROBES
+2 −0
Original line number Original line Diff line number Diff line
@@ -16,6 +16,8 @@
#define PCI_IOBASE ((void __iomem *)0)
#define PCI_IOBASE ((void __iomem *)0)


extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
				  unsigned long flags);
extern void iounmap(const void __iomem *addr);
extern void iounmap(const void __iomem *addr);


#define ioremap_nocache(phy, sz)	ioremap(phy, sz)
#define ioremap_nocache(phy, sz)	ioremap(phy, sz)
+3 −0
Original line number Original line Diff line number Diff line
@@ -48,6 +48,8 @@ typedef unsigned long pgtable_t;
#define __pgd(x)        ((pgd_t) { (x) })
#define __pgd(x)        ((pgd_t) { (x) })
#define __pgprot(x)     ((pgprot_t) { (x) })
#define __pgprot(x)     ((pgprot_t) { (x) })


#define pte_pgprot(x) __pgprot(pte_val(x))

#else /* !STRICT_MM_TYPECHECKS */
#else /* !STRICT_MM_TYPECHECKS */


typedef unsigned long pte_t;
typedef unsigned long pte_t;
@@ -60,6 +62,7 @@ typedef unsigned long pgtable_t;
#define pgprot_val(x)	(x)
#define pgprot_val(x)	(x)
#define __pte(x)	(x)
#define __pte(x)	(x)
#define __pgprot(x)	(x)
#define __pgprot(x)	(x)
#define pte_pgprot(x)	(x)


#endif
#endif


+36 −12
Original line number Original line Diff line number Diff line
@@ -16,25 +16,49 @@


void __iomem *ioremap(unsigned long paddr, unsigned long size)
void __iomem *ioremap(unsigned long paddr, unsigned long size)
{
{
	unsigned long vaddr;
	unsigned long end;
	struct vm_struct *area;
	unsigned long off, end;
	const pgprot_t prot = PAGE_KERNEL_NO_CACHE;


	/* Don't allow wraparound or zero size */
	/* Don't allow wraparound or zero size */
	end = paddr + size - 1;
	end = paddr + size - 1;
	if (!size || (end < paddr))
	if (!size || (end < paddr))
		return NULL;
		return NULL;


	/* If the region is h/w uncached, nothing special needed */
	/* If the region is h/w uncached, avoid MMU mappings */
	if (paddr >= ARC_UNCACHED_ADDR_SPACE)
	if (paddr >= ARC_UNCACHED_ADDR_SPACE)
		return (void __iomem *)paddr;
		return (void __iomem *)paddr;


	return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
}
EXPORT_SYMBOL(ioremap);

/*
 * ioremap with access flags
 * Cache semantics wise it is same as ioremap - "forced" uncached.
 * However unline vanilla ioremap which bypasses ARC MMU for addresses in
 * ARC hardware uncached region, this one still goes thru the MMU as caller
 * might need finer access control (R/W/X)
 */
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
			   unsigned long flags)
{
	void __iomem *vaddr;
	struct vm_struct *area;
	unsigned long off, end;
	pgprot_t prot = __pgprot(flags);

	/* Don't allow wraparound, zero size */
	end = paddr + size - 1;
	if ((!size) || (end < paddr))
		return NULL;

	/* An early platform driver might end up here */
	/* An early platform driver might end up here */
	if (!slab_is_available())
	if (!slab_is_available())
		return NULL;
		return NULL;


	/* Mappings have to be page-aligned, page-sized */
	/* force uncached */
	prot = pgprot_noncached(prot);

	/* Mappings have to be page-aligned */
	off = paddr & ~PAGE_MASK;
	off = paddr & ~PAGE_MASK;
	paddr &= PAGE_MASK;
	paddr &= PAGE_MASK;
	size = PAGE_ALIGN(end + 1) - paddr;
	size = PAGE_ALIGN(end + 1) - paddr;
@@ -45,17 +69,17 @@ void __iomem *ioremap(unsigned long paddr, unsigned long size)
	area = get_vm_area(size, VM_IOREMAP);
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
	if (!area)
		return NULL;
		return NULL;

	area->phys_addr = paddr;
	area->phys_addr = paddr;
	vaddr = (unsigned long)area->addr;
	vaddr = (void __iomem *)area->addr;
	if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
	if (ioremap_page_range((unsigned long)vaddr,
		vfree(area->addr);
			       (unsigned long)vaddr + size, paddr, prot)) {
		vunmap((void __force *)vaddr);
		return NULL;
		return NULL;
	}
	}

	return (void __iomem *)(off + (char __iomem *)vaddr);
	return (void __iomem *)(off + (char __iomem *)vaddr);
}
}
EXPORT_SYMBOL(ioremap);
EXPORT_SYMBOL(ioremap_prot);



void iounmap(const void __iomem *addr)
void iounmap(const void __iomem *addr)
{
{