Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2275bf3 authored by Neeti Desai's avatar Neeti Desai Committed by Patrick Daly
Browse files

arm64: dma-mapping: Split large pages when allocating in atomic context



In atomic context, gen_pool_alloc allocates a single page large
enough to accommodate the requested size. However __iommu_create_mapping
always maps pages assuming they are of size 4K. Thus only the first
4K of the buffer is mapped and a translation fault is generated
during an unmap.

Fix this by splitting the larger pages into 4K pages.

Change-Id: Ifcbe29477ad210204028486bd011470fe8b50852
Signed-off-by: default avatarNeeti Desai <neetid@codeaurora.org>
Signed-off-by: default avatarMitchel Humpherys <mitchelh@codeaurora.org>
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
parent 040a288a
Loading
Loading
Loading
Loading
+20 −2
Original line number Diff line number Diff line
@@ -1366,20 +1366,38 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size,
				  dma_addr_t *handle, gfp_t gfp)
{
	struct page *page;
	struct page **pages;
	int count = size >> PAGE_SHIFT;
	int array_size = count * sizeof(struct page *);
	int i;
	void *addr;

	if (array_size <= PAGE_SIZE)
		pages = kzalloc(array_size, gfp);
	else
		pages = vzalloc(array_size);

	if (!pages)
		return NULL;

	addr = __alloc_from_pool(size, &page, gfp);
	if (!addr)
		return NULL;
		goto err_free;

	*handle = __iommu_create_mapping(dev, &page, size);
	for (i = 0; i < count ; i++)
		pages[i] = page + i;

	*handle = __iommu_create_mapping(dev, pages, size);
	if (*handle == DMA_ERROR_CODE)
		goto err_mapping;

	kvfree(pages);
	return addr;

err_mapping:
	__free_from_pool(addr, size);
err_free:
	kvfree(pages);
	return NULL;
}