Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d76368d4 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm: dma-mapping: page align size before flush tlb"

parents abe4fc28 9e6d9840
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -62,6 +62,17 @@ config DEBUG_USER
	      8 - SIGSEGV faults
	     16 - SIGBUS faults

config FORCE_PAGES
	bool "Force lowmem to be mapped with 4K pages"
        help
          There are some advanced debug features that can only be done when
          memory is mapped with pages instead of sections. Enable this option
          to always map lowmem pages with pages. This may have a performance
          cost due to increased TLB pressure.

          If unsure say N.


# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
	bool "Kernel low-level debugging functions (read help!)"
+1 −0
Original line number Diff line number Diff line
@@ -363,6 +363,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define writel_relaxed(v, c)	__raw_writel((__force u32) cpu_to_le32(v), c)
#define writeq_relaxed(v, c)	__raw_writeq((__force u64) cpu_to_le64(v), c)
#define writeb_relaxed_no_log(v, c)	((void)__raw_writeb_no_log((v), (c)))
#define writew_relaxed_no_log(v, c) __raw_writew_no_log((__force u16) cpu_to_le16(v), c)
#define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) cpu_to_le32(v), c)
#define writeq_relaxed_no_log(v, c) __raw_writeq_no_log((__force u64) cpu_to_le64(v), c)

+61 −7
Original line number Diff line number Diff line
@@ -505,6 +505,15 @@ void __init dma_contiguous_remap(void)
		struct map_desc map;
		unsigned long addr;

		/*
		 * Make start and end PMD_SIZE aligned, observing memory
		 * boundaries
		 */
		if (memblock_is_memory(start & PMD_MASK))
			start = start & PMD_MASK;
		if (memblock_is_memory(ALIGN(end, PMD_SIZE)))
			end = ALIGN(end, PMD_SIZE);

		if (end > arm_lowmem_limit)
			end = arm_lowmem_limit;
		if (start >= end)
@@ -525,8 +534,13 @@ void __init dma_contiguous_remap(void)
		 * and ensures that this code is architecturally compliant.
		 */
		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
		     addr += PMD_SIZE)
			pmd_clear(pmd_off_k(addr));
		     addr += PMD_SIZE) {
			pmd_t *pmd;

			pmd = pmd_off_k(addr);
			if (pmd_bad(*pmd))
				pmd_clear(pmd);
		}

		flush_tlb_kernel_range(__phys_to_virt(start),
				       __phys_to_virt(end));
@@ -697,9 +711,14 @@ static void __free_from_contiguous(struct device *dev, struct page *page,

static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
{
	prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
			pgprot_writecombine(prot) :
			pgprot_dmacoherent(prot);
	if (attrs & DMA_ATTR_WRITE_COMBINE)
		prot = pgprot_writecombine(prot);
	else if (attrs & DMA_ATTR_STRONGLY_ORDERED)
		prot = pgprot_stronglyordered(prot);
	/* if non-consistent just pass back what was given */
	else if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
		prot = pgprot_dmacoherent(prot);

	return prot;
}

@@ -955,6 +974,7 @@ static void arm_dma_unremap(struct device *dev, void *remapped_addr,
	unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
	struct vm_struct *area;

	size = PAGE_ALIGN(size);
	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);

	area = find_vm_area(remapped_addr);
@@ -965,6 +985,8 @@ static void arm_dma_unremap(struct device *dev, void *remapped_addr,
	}

	vunmap(remapped_addr);
	flush_tlb_kernel_range((unsigned long)remapped_addr,
			(unsigned long)(remapped_addr + size));
}
/*
 * Create userspace mapping for the DMA-coherent memory.
@@ -1912,7 +1934,31 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
{
	return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
	struct scatterlist *s;
	int i;
	size_t ret;
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	unsigned int total_length = 0, current_offset = 0;
	dma_addr_t iova;
	int prot = __dma_direction_to_prot(dir);

	for_each_sg(sg, s, nents, i)
		total_length += s->length;

	iova = __alloc_iova(mapping, total_length);
	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
	if (ret != total_length) {
		__free_iova(mapping, iova, total_length);
		return 0;
	}

	for_each_sg(sg, s, nents, i) {
		s->dma_address = iova + current_offset;
		s->dma_length = total_length - current_offset;
		current_offset += s->length;
	}

	return nents;
}

static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -1963,7 +2009,15 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
			enum dma_data_direction dir,
			unsigned long attrs)
{
	__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	unsigned int total_length = sg_dma_len(sg);
	dma_addr_t iova = sg_dma_address(sg);

	total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length);
	iova &= PAGE_MASK;

	iommu_unmap(mapping->domain, iova, total_length);
	__free_iova(mapping, iova, total_length);
}

/**
+114 −0
Original line number Diff line number Diff line
@@ -1633,6 +1633,119 @@ void __init early_paging_init(const struct machine_desc *mdesc)

#endif

#ifdef CONFIG_FORCE_PAGES
/*
 * remap a PMD into pages
 * We split a single pmd here none of this two pmd nonsense
 */
static noinline void __init split_pmd(pmd_t *pmd, unsigned long addr,
				unsigned long end, unsigned long pfn,
				const struct mem_type *type)
{
	pte_t *pte, *start_pte;
	pmd_t *base_pmd;

	base_pmd = pmd_offset(
			pud_offset(pgd_offset(&init_mm, addr), addr), addr);

	if (pmd_none(*base_pmd) || pmd_bad(*base_pmd)) {
		start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
#ifndef CONFIG_ARM_LPAE
		/*
		 * Following is needed when new pte is allocated for pmd[1]
		 * cases, which may happen when base (start) address falls
		 * under pmd[1].
		 */
		if (addr & SECTION_SIZE)
			start_pte += pte_index(addr);
#endif
	} else {
		start_pte = pte_offset_kernel(base_pmd, addr);
	}

	pte = start_pte;

	do {
		set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0);
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);

	*pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1);
	mb(); /* let pmd be programmed */
	flush_pmd_entry(pmd);
	flush_tlb_all();
}

/*
 * It's significantly easier to remap as pages later after all memory is
 * mapped. Everything is sections so all we have to do is split
 */
static void __init remap_pages(void)
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		phys_addr_t phys_start = reg->base;
		phys_addr_t phys_end = reg->base + reg->size;
		unsigned long addr = (unsigned long)__va(phys_start);
		unsigned long end = (unsigned long)__va(phys_end);
		pmd_t *pmd = NULL;
		unsigned long next;
		unsigned long pfn = __phys_to_pfn(phys_start);
		bool fixup = false;
		unsigned long saved_start = addr;

		if (phys_start > arm_lowmem_limit)
			break;
		if (phys_end > arm_lowmem_limit)
			end = (unsigned long)__va(arm_lowmem_limit);
		if (phys_start >= phys_end)
			break;

		pmd = pmd_offset(
			pud_offset(pgd_offset(&init_mm, addr), addr), addr);

#ifndef	CONFIG_ARM_LPAE
		if (addr & SECTION_SIZE) {
			fixup = true;
			pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK);
			pmd++;
		}

		if (end & SECTION_SIZE)
			pmd_empty_section_gap(end);
#endif

		do {
			next = addr + SECTION_SIZE;

			if (pmd_none(*pmd) || pmd_bad(*pmd))
				split_pmd(pmd, addr, next, pfn,
						&mem_types[MT_MEMORY_RWX]);
			pmd++;
			pfn += SECTION_SIZE >> PAGE_SHIFT;

		} while (addr = next, addr < end);

		if (fixup) {
			/*
			 * Put a faulting page table here to avoid detecting no
			 * pmd when accessing an odd section boundary. This
			 * needs to be faulting to help catch errors and avoid
			 * speculation
			 */
			pmd = pmd_off_k(saved_start);
			pmd[0] = pmd[1] & ~1;
		}
	}
}
#else
static void __init remap_pages(void)
{

}
#endif

static void __init early_fixmap_shutdown(void)
{
	int i;
@@ -1676,6 +1789,7 @@ void __init paging_init(const struct machine_desc *mdesc)
	memblock_set_current_limit(arm_lowmem_limit);
	dma_contiguous_remap();
	early_fixmap_shutdown();
	remap_pages();
	devicemaps_init(mdesc);
	kmap_init();
	tcm_init();
+6 −4
Original line number Diff line number Diff line
@@ -52,11 +52,13 @@ static int change_memory_common(unsigned long addr, int numpages,
	if (!numpages)
		return 0;

	if (!IS_ENABLED(CONFIG_FORCE_PAGES)) {
		if (start < MODULES_VADDR || start >= MODULES_END)
			return -EINVAL;

		if (end < MODULES_VADDR || start >= MODULES_END)
			return -EINVAL;
	}

	data.set_mask = set_mask;
	data.clear_mask = clear_mask;
Loading