Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b839a57 authored by Joerg Roedel's avatar Joerg Roedel
Browse files

iommu/amd: Add support for contiguous dma allocator



Add code to allocate memory from the contiguous memory
allocator to support coherent allocations larger than 8MB.

Tested-by: default avatarSuravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 5fc872c7
Loading
Loading
Loading
Loading
+28 −16
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/dma-contiguous.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -2913,37 +2914,42 @@ static void *alloc_coherent(struct device *dev, size_t size,
			    dma_addr_t *dma_addr, gfp_t flag,
			    struct dma_attrs *attrs)
{
	unsigned long flags;
	void *virt_addr;
	struct protection_domain *domain;
	phys_addr_t paddr;
	u64 dma_mask = dev->coherent_dma_mask;
	struct protection_domain *domain;
	unsigned long flags;
	struct page *page;

	INC_STATS_COUNTER(cnt_alloc_coherent);

	domain = get_domain(dev);
	if (PTR_ERR(domain) == -EINVAL) {
		virt_addr = (void *)__get_free_pages(flag, get_order(size));
		*dma_addr = __pa(virt_addr);
		return virt_addr;
		page = alloc_pages(flag, get_order(size));
		*dma_addr = page_to_phys(page);
		return page_address(page);
	} else if (IS_ERR(domain))
		return NULL;

	size	  = PAGE_ALIGN(size);
	dma_mask  = dev->coherent_dma_mask;
	flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);

	virt_addr = (void *)__get_free_pages(flag, get_order(size));
	if (!virt_addr)
	page = alloc_pages(flag | __GFP_NOWARN,  get_order(size));
	if (!page) {
		if (!(flag & __GFP_WAIT))
			return NULL;

	paddr = virt_to_phys(virt_addr);
		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
						 get_order(size));
		if (!page)
			return NULL;
	}

	if (!dma_mask)
		dma_mask = *dev->dma_mask;

	spin_lock_irqsave(&domain->lock, flags);

	*dma_addr = __map_single(dev, domain->priv, paddr,
	*dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
				 size, DMA_BIDIRECTIONAL, true, dma_mask);

	if (*dma_addr == DMA_ERROR_CODE) {
@@ -2955,11 +2961,12 @@ static void *alloc_coherent(struct device *dev, size_t size,

	spin_unlock_irqrestore(&domain->lock, flags);

	return virt_addr;
	return page_address(page);

out_free:

	free_pages((unsigned long)virt_addr, get_order(size));
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, get_order(size));

	return NULL;
}
@@ -2971,11 +2978,15 @@ static void free_coherent(struct device *dev, size_t size,
			  void *virt_addr, dma_addr_t dma_addr,
			  struct dma_attrs *attrs)
{
	unsigned long flags;
	struct protection_domain *domain;
	unsigned long flags;
	struct page *page;

	INC_STATS_COUNTER(cnt_free_coherent);

	page = virt_to_page(virt_addr);
	size = PAGE_ALIGN(size);

	domain = get_domain(dev);
	if (IS_ERR(domain))
		goto free_mem;
@@ -2989,7 +3000,8 @@ static void free_coherent(struct device *dev, size_t size,
	spin_unlock_irqrestore(&domain->lock, flags);

free_mem:
	free_pages((unsigned long)virt_addr, get_order(size));
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, get_order(size));
}

/*