Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9740ba42 authored by Laura Abbott's avatar Laura Abbott
Browse files

cma: use pfn instead of pages for argument passing



The CMA code is generic enough that it can be expanded out to track
regions of memory that aren't officially managed by the regular page
allocator. This memory can't be referenced via struct page. Change the
CMA apis to track using pfn for allocation/free instead. The pfn can
be converted to a struct page as needed elsewhere.

Change-Id: I5ac3fa5e2169b2101a738177f1654faa401f7604
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
parent 2d37de8c
Loading
Loading
Loading
Loading
+13 −7
Original line number Diff line number Diff line
@@ -580,13 +580,16 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
{
	unsigned long order = get_order(size);
	size_t count = size >> PAGE_SHIFT;
	unsigned long pfn;
	struct page *page;
	void *ptr;

	page = dma_alloc_from_contiguous(dev, count, order);
	if (!page)
	pfn = dma_alloc_from_contiguous(dev, count, order);
	if (!pfn)
		return NULL;

	page = pfn_to_page(pfn);

	__dma_clear_buffer(page, size);

	if (PageHighMem(page)) {
@@ -601,7 +604,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
			ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot,
						caller);
			if (!ptr) {
				dma_release_from_contiguous(dev, page, count);
				dma_release_from_contiguous(dev, pfn, count);
				return NULL;
			}
		}
@@ -620,7 +623,7 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
		__dma_free_remap(cpu_addr, size, true);
	else
		__dma_remap(page, size, pgprot_kernel, false);
	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
	dma_release_from_contiguous(dev, page_to_pfn(page), size >> PAGE_SHIFT);
}

static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
@@ -1127,11 +1130,14 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
	{
		unsigned long order = get_order(size);
		struct page *page;
		unsigned long pfn;

		page = dma_alloc_from_contiguous(dev, count, order);
		if (!page)
		pfn = dma_alloc_from_contiguous(dev, count, order);
		if (!pfn)
			goto error;

		pfn = pfn_to_page(pfn);

		__dma_clear_buffer(page, size);

		for (i = 0; i < count; i++)
@@ -1186,7 +1192,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages,
	int i;

	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
		dma_release_from_contiguous(dev, pages[0], count);
		dma_release_from_contiguous(dev, page_to_pfn(pages[0]), count);
	} else {
		for (i = 0; i < count; i++)
			if (pages[i])
+9 −14
Original line number Diff line number Diff line
@@ -494,17 +494,16 @@ phys_addr_t cma_get_base(struct device *dev)
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
unsigned long dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	unsigned long mask, pfn = 0, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret = 0;
	int tries = 0;

	if (!cma || !cma->count)
		return NULL;
		return 0;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;
@@ -513,7 +512,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
		 count, align);

	if (!count)
		return NULL;
		return 0;

	mask = (1 << align) - 1;

@@ -530,7 +529,6 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
			ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			break;
		} else if (ret != -EBUSY) {
			break;
@@ -545,8 +543,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
	}

	mutex_unlock(&cma_mutex);
	pr_debug("%s(): returned %p\n", __func__, page);
	return page;
	pr_debug("%s(): returned %lx\n", __func__, pfn);
	return pfn;
}

/**
@@ -559,18 +557,15 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
 * It returns false when provided pages do not belong to contiguous area and
 * true otherwise.
 */
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
bool dma_release_from_contiguous(struct device *dev, unsigned long pfn,
				 int count)
{
	struct cma *cma = dev_get_cma_area(dev);
	unsigned long pfn;

	if (!cma || !pages)
	if (!cma || !pfn)
		return false;

	pr_debug("%s(page %p)\n", __func__, (void *)pages);

	pfn = page_to_pfn(pages);
	pr_debug("%s(pfn %lx)\n", __func__, pfn);

	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
		return false;
+5 −5
Original line number Diff line number Diff line
@@ -113,9 +113,9 @@ static inline int dma_declare_contiguous_reserved(struct device *dev,
	return ret;
}

struct page *dma_alloc_from_contiguous(struct device *dev, int count,
unsigned long dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int order);
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
bool dma_release_from_contiguous(struct device *dev, unsigned long pfn,
				 int count);

#else
@@ -132,14 +132,14 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
}

static inline
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
unsigned long dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int order)
{
	return NULL;
	return 0;
}

static inline
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
bool dma_release_from_contiguous(struct device *dev, unsigned long pfn,
				 int count)
{
	return false;