Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0a85ff1b authored by Laura Abbott's avatar Laura Abbott Committed by Saravana Kannan
Browse files

ANDROID: GKI: arm64: Support early fixup for CMA



Although it isn't architecturally required, CMA regions may need
to have attributes changed at runtime. Remap the CMA regions as
pages to allow this to happen.

Change-Id: I7dd7fa150ce69fdf05f8bf6f76a5ae26dd67ff1b
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
Bug: 155522481
Signed-off-by: default avatarMark Salyzyn <salyzyn@google.com>
[saravanak snapshot from commit 79efc458af96 that approximately matches
commit 509553a5afc4c3b554f539a3a6b5fa60bfdce259]
Signed-off-by: default avatarSaravana Kannan <saravanak@google.com>
parent 9b76f03f
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2013,2017-2018 The Linux Foundation. All rights reserved.
 */

#ifndef _ASM_DMA_CONTIGUOUS_H
#define _ASM_DMA_CONTIGUOUS_H

#ifdef __KERNEL__
#ifdef CONFIG_DMA_CMA

#include <linux/types.h>

void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);

#endif
#endif

#endif
+40 −2
Original line number Diff line number Diff line
@@ -32,6 +32,8 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/dma-contiguous.h>
#include <linux/cma.h>

#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -67,6 +69,40 @@ static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;

struct dma_contig_early_reserve {
	phys_addr_t base;
	unsigned long size;
};

static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS];
static int dma_mmu_remap_num;

void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
{
	if (dma_mmu_remap_num >= ARRAY_SIZE(dma_mmu_remap)) {
		pr_err("ARM64: Not enough slots for DMA fixup reserved regions!\n");
		return;
	}
	dma_mmu_remap[dma_mmu_remap_num].base = base;
	dma_mmu_remap[dma_mmu_remap_num].size = size;
	dma_mmu_remap_num++;
}

static bool dma_overlap(phys_addr_t start, phys_addr_t end)
{
	int i;

	for (i = 0; i < dma_mmu_remap_num; i++) {
		phys_addr_t dma_base = dma_mmu_remap[i].base;
		phys_addr_t dma_end = dma_mmu_remap[i].base +
			dma_mmu_remap[i].size;

		if ((dma_base < end) && (dma_end > start))
			return true;
	}
	return false;
}

pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
			      unsigned long size, pgprot_t vma_prot)
{
@@ -200,7 +236,8 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,

		/* try section mapping first */
		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
		    (flags & NO_BLOCK_MAPPINGS) == 0) {
		    (flags & NO_BLOCK_MAPPINGS) == 0 &&
		    !dma_overlap(phys, phys + next - addr)) {
			pmd_set_huge(pmdp, phys, prot);

			/*
@@ -299,7 +336,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
		 * For 4K granule only, attempt to put down a 1GB block
		 */
		if (use_1G_block(addr, next, phys) &&
		    (flags & NO_BLOCK_MAPPINGS) == 0) {
		    (flags & NO_BLOCK_MAPPINGS) == 0 &&
		    !dma_overlap(phys, phys + next - addr)) {
			pud_set_huge(pudp, phys, prot);

			/*