Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c080e26e authored by Marek Szyprowski's avatar Marek Szyprowski
Browse files

x86: dma-mapping: fix broken allocation when dma_mask has been provided



Commit 0a2b9a6e ("X86: integrate CMA with DMA-mapping subsystem")
broke memory allocation with dma_mask. This patch fixes possible kernel
ops caused by lack of resetting page variable when jumping to 'again' label.

Reported-by: default avatarKonrad Rzeszutek Wilk <konrad@darnok.org>
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Acked-by: default avatarMichal Nazarewicz <mina86@mina86.com>
parent fdb11173
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
				 struct dma_attrs *attrs)
				 struct dma_attrs *attrs)
{
{
	unsigned long dma_mask;
	unsigned long dma_mask;
	struct page *page = NULL;
	struct page *page;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t addr;
	dma_addr_t addr;


@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,


	flag |= __GFP_ZERO;
	flag |= __GFP_ZERO;
again:
again:
	page = NULL;
	if (!(flag & GFP_ATOMIC))
	if (!(flag & GFP_ATOMIC))
		page = dma_alloc_from_contiguous(dev, count, get_order(size));
		page = dma_alloc_from_contiguous(dev, count, get_order(size));
	if (!page)
	if (!page)