Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0ea90b9e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'microblaze-3.19-rc1' of git://git.monstr.eu/linux-2.6-microblaze

Pull Microblaze fix from Michal Simek:
 "Fix mmap for cache coherent memory"

* tag 'microblaze-3.19-rc1' of git://git.monstr.eu/linux-2.6-microblaze:
  microblaze: Fix mmap for cache coherent memory
parents 5f5425ef 3a8e3265
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -565,6 +565,7 @@ void consistent_free(size_t size, void *vaddr);
void consistent_sync(void *vaddr, size_t size, int direction);
void consistent_sync(void *vaddr, size_t size, int direction);
void consistent_sync_page(struct page *page, unsigned long offset,
void consistent_sync_page(struct page *page, unsigned long offset,
	size_t size, int direction);
	size_t size, int direction);
unsigned long consistent_virt_to_pfn(void *vaddr);


void setup_memory(void);
void setup_memory(void);
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLY__ */
+27 −0
Original line number Original line Diff line number Diff line
@@ -154,9 +154,36 @@ dma_direct_sync_sg_for_device(struct device *dev,
			__dma_sync(sg->dma_address, sg->length, direction);
			__dma_sync(sg->dma_address, sg->length, direction);
}
}


int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
			     void *cpu_addr, dma_addr_t handle, size_t size,
			     struct dma_attrs *attrs)
{
#ifdef CONFIG_MMU
	unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;
	unsigned long pfn;

	if (off >= count || user_count > (count - off))
		return -ENXIO;

#ifdef NOT_COHERENT_CACHE
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	pfn = consistent_virt_to_pfn(cpu_addr);
#else
	pfn = virt_to_pfn(cpu_addr);
#endif
	return remap_pfn_range(vma, vma->vm_start, pfn + off,
			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
#else
	return -ENXIO;
#endif
}

struct dma_map_ops dma_direct_ops = {
struct dma_map_ops dma_direct_ops = {
	.alloc		= dma_direct_alloc_coherent,
	.alloc		= dma_direct_alloc_coherent,
	.free		= dma_direct_free_coherent,
	.free		= dma_direct_free_coherent,
	.mmap		= dma_direct_mmap_coherent,
	.map_sg		= dma_direct_map_sg,
	.map_sg		= dma_direct_map_sg,
	.dma_supported	= dma_direct_dma_supported,
	.dma_supported	= dma_direct_dma_supported,
	.map_page	= dma_direct_map_page,
	.map_page	= dma_direct_map_page,
+20 −5
Original line number Original line Diff line number Diff line
@@ -156,6 +156,25 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
}
}
EXPORT_SYMBOL(consistent_alloc);
EXPORT_SYMBOL(consistent_alloc);


#ifdef CONFIG_MMU
static pte_t *consistent_virt_to_pte(void *vaddr)
{
	unsigned long addr = (unsigned long)vaddr;

	return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
}

unsigned long consistent_virt_to_pfn(void *vaddr)
{
	pte_t *ptep = consistent_virt_to_pte(vaddr);

	if (pte_none(*ptep) || !pte_present(*ptep))
		return 0;

	return pte_pfn(*ptep);
}
#endif

/*
/*
 * free page(s) as defined by the above mapping.
 * free page(s) as defined by the above mapping.
 */
 */
@@ -181,13 +200,9 @@ void consistent_free(size_t size, void *vaddr)
	} while (size -= PAGE_SIZE);
	} while (size -= PAGE_SIZE);
#else
#else
	do {
	do {
		pte_t *ptep;
		pte_t *ptep = consistent_virt_to_pte(vaddr);
		unsigned long pfn;
		unsigned long pfn;


		ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
						(unsigned int)vaddr),
					(unsigned int)vaddr),
				(unsigned int)vaddr);
		if (!pte_none(*ptep) && pte_present(*ptep)) {
		if (!pte_none(*ptep) && pte_present(*ptep)) {
			pfn = pte_pfn(*ptep);
			pfn = pte_pfn(*ptep);
			pte_clear(&init_mm, (unsigned int)vaddr, ptep);
			pte_clear(&init_mm, (unsigned int)vaddr, ptep);