Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ac8999b7 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Kconfig: Add menu choice option to reclaim virtual memory"

parents fdb96725 410c74ba
Loading
Loading
Loading
Loading
+29 −0
Original line number Diff line number Diff line
@@ -1627,6 +1627,35 @@ config ARM_MODULE_PLTS
	  Disabling this is usually safe for small single-platform
	  configurations. If unsure, say y.

choice
	prompt "Virtual Memory Reclaim"
	default NO_VM_RECLAIM
	help
	  Select the method of reclaiming virtual memory. Two values
	  are allowed to choose, one is NO_VM_RECLAIM, the other is
	  ENABLE_VMALLOC_SAVING.

	  If you are not absolutely sure what you are doing, leave this
	  option alone.

config ENABLE_VMALLOC_SAVING
	bool "Reclaim memory for each subsystem"
	help
	  Enable this config to reclaim the virtual space belonging
	  to any subsystem which is expected to have a lifetime of
	  the entire system. This feature allows lowmem to be non-
	  contiguous.

config NO_VM_RECLAIM
	bool "Do not reclaim memory"
	help
	  Do not reclaim any memory. This might result in less lowmem
	  and wasting some virtual memory space which could otherwise
	  be reclaimed by using any of the other two config options as
	  above.

endchoice

config FORCE_MAX_ZONEORDER
	int "Maximum zone order"
	default "12" if SOC_AM33XX
+72 −27
Original line number Diff line number Diff line
@@ -111,6 +111,30 @@ static void __dma_page_cpu_to_dev(struct page *, unsigned long,
static void __dma_page_dev_to_cpu(struct page *, unsigned long,
		size_t, enum dma_data_direction);

static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
					bool coherent);

static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
				 bool coherent)
{
	if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
		return pgprot_writecombine(prot);
	return prot;
}

static bool is_dma_coherent(struct device *dev, unsigned long attrs,
			    bool is_coherent)
{
	if (attrs & DMA_ATTR_FORCE_COHERENT)
		is_coherent = true;
	else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
		is_coherent = false;
	else if (dev->archdata.dma_coherent)
		is_coherent = true;

	return is_coherent;
}

/**
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -603,17 +627,6 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}

static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
{
	if (attrs & DMA_ATTR_WRITE_COMBINE)
		prot = pgprot_writecombine(prot);
	/* if non-consistent just pass back what was given */
	else if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
		prot = pgprot_dmacoherent(prot);

	return prot;
}

static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
				   struct page **ret_page)
{
@@ -788,7 +801,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
		    gfp_t gfp, unsigned long attrs)
{
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);

	return __dma_alloc(dev, size, handle, gfp, prot, false,
			   attrs, __builtin_return_address(0));
@@ -838,7 +851,8 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
		 unsigned long attrs)
{
	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
						false);
	return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}

@@ -1365,16 +1379,18 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages,
 */
static dma_addr_t
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
		       unsigned long attrs)
			int coherent_flag)
{
	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t dma_addr, iova;
	int i;
	int prot = IOMMU_READ | IOMMU_WRITE;

	dma_addr = __alloc_iova(mapping, size);
	if (dma_addr == DMA_MAPPING_ERROR)
		return dma_addr;
	prot |= coherent_flag ? IOMMU_CACHE : 0;

	iova = dma_addr;
	for (i = 0; i < count; ) {
@@ -1389,8 +1405,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
				break;

		len = (j - i) << PAGE_SHIFT;
		ret = iommu_map(mapping->domain, iova, phys, len,
				__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
		ret = iommu_map(mapping->domain, iova, phys, len, prot);
		if (ret < 0)
			goto fail;
		iova += len;
@@ -1455,7 +1470,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
	if (!addr)
		return NULL;

	*handle = __iommu_create_mapping(dev, &page, size, attrs);
	*handle = __iommu_create_mapping(dev, &page, size, coherent_flag);
	if (*handle == DMA_MAPPING_ERROR)
		goto err_mapping;

@@ -1480,9 +1495,9 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
	    int coherent_flag)
{
	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
	struct page **pages;
	void *addr = NULL;
	pgprot_t prot;

	*handle = DMA_MAPPING_ERROR;
	size = PAGE_ALIGN(size);
@@ -1491,6 +1506,8 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
		return __iommu_alloc_simple(dev, size, gfp, handle,
					    coherent_flag, attrs);

	coherent_flag = is_dma_coherent(dev, attrs, coherent_flag);
	prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent_flag);
	/*
	 * Following is a work-around (a.k.a. hack) to prevent pages
	 * with __GFP_COMP being passed to split_page() which cannot
@@ -1504,7 +1521,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
	if (!pages)
		return NULL;

	*handle = __iommu_create_mapping(dev, pages, size, attrs);
	*handle = __iommu_create_mapping(dev, pages, size, coherent_flag);
	if (*handle == DMA_MAPPING_ERROR)
		goto err_buffer;

@@ -1561,7 +1578,8 @@ static int arm_iommu_mmap_attrs(struct device *dev,
		struct vm_area_struct *vma, void *cpu_addr,
		dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
					is_dma_coherent(dev, attrs, NORMAL));

	return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
}
@@ -1604,7 +1622,8 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
void arm_iommu_free_attrs(struct device *dev, size_t size,
		    void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{
	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs,
				is_dma_coherent(dev, attrs, NORMAL));
}

void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
@@ -1759,11 +1778,18 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
	unsigned int total_length = 0, current_offset = 0;
	dma_addr_t iova;
	int prot = __dma_info_to_prot(dir, attrs);
	bool coherent;

	for_each_sg(sg, s, nents, i)
		total_length += s->length;

	iova = __alloc_iova(mapping, total_length);
	if (iova == DMA_MAPPING_ERROR)
		return 0;

	coherent = of_dma_is_coherent(dev->of_node);
	prot |= is_dma_coherent(dev, attrs, coherent) ? IOMMU_CACHE : 0;

	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
	if (ret != total_length) {
		__free_iova(mapping, iova, total_length);
@@ -1853,6 +1879,12 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
{
	struct scatterlist *s;
	int i;
	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova = sg_dma_address(sg);
	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);

	if (iova_coherent)
		return;

	for_each_sg(sg, s, nents, i)
		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
@@ -1872,6 +1904,13 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
	struct scatterlist *s;
	int i;

	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
	dma_addr_t iova = sg_dma_address(sg);
	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);

	if (iova_coherent)
		return;

	for_each_sg(sg, s, nents, i)
		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
@@ -1930,7 +1969,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     unsigned long attrs)
{
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
	if (!is_dma_coherent(dev, attrs, false) &&
	      !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		__dma_page_cpu_to_dev(page, offset, size, dir);

	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
@@ -1978,7 +2018,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
	if (!iova)
		return;

	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
	if (!(is_dma_coherent(dev, attrs, false) ||
	      (attrs & DMA_ATTR_SKIP_CPU_SYNC)))
		__dma_page_dev_to_cpu(page, offset, size, dir);

	iommu_unmap(mapping->domain, iova, len);
@@ -2049,7 +2090,9 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
	unsigned int offset = handle & ~PAGE_MASK;
	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);

	if (!iova_coherent)
		__dma_page_dev_to_cpu(page, offset, size, dir);
}

@@ -2060,7 +2103,9 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
	unsigned int offset = handle & ~PAGE_MASK;
	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);

	if (!iova_coherent)
		__dma_page_cpu_to_dev(page, offset, size, dir);
}

@@ -2586,7 +2631,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp, unsigned long attrs)
{
	return __dma_alloc(dev, size, dma_handle, gfp,
			   __get_dma_pgprot(attrs, PAGE_KERNEL), false,
			   __get_dma_pgprot(attrs, PAGE_KERNEL, false), false,
			   attrs, __builtin_return_address(0));
}