Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9051aa02 authored by David Woodhouse's avatar David Woodhouse
Browse files

intel-iommu: Combine domain_pfn_mapping() and domain_sg_mapping()

parent e1605495
Loading
Loading
Loading
Loading
+22 −40
Original line number Original line Diff line number Diff line
@@ -1635,14 +1635,14 @@ static int domain_context_mapped(struct pci_dev *pdev)
					     tmp->devfn);
					     tmp->devfn);
}
}


static int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
			     struct scatterlist *sg, unsigned long nr_pages,
			    struct scatterlist *sg, unsigned long phys_pfn,
			     int prot)
			    unsigned long nr_pages, int prot)
{
{
	struct dma_pte *first_pte = NULL, *pte = NULL;
	struct dma_pte *first_pte = NULL, *pte = NULL;
	uint64_t pteval;
	phys_addr_t uninitialized_var(pteval);
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
	unsigned long sg_res = 0;
	unsigned long sg_res;


	BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
	BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);


@@ -1651,6 +1651,13 @@ static int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,


	prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
	prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;


	if (sg)
		sg_res = 0;
	else {
		sg_res = nr_pages + 1;
		pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
	}

	while (nr_pages--) {
	while (nr_pages--) {
		if (!sg_res) {
		if (!sg_res) {
			sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
			sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
@@ -1685,43 +1692,18 @@ static int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
	return 0;
	return 0;
}
}


static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
			      unsigned long phys_pfn, unsigned long nr_pages,
				    struct scatterlist *sg, unsigned long nr_pages,
				    int prot)
				    int prot)
{
{
	struct dma_pte *first_pte = NULL, *pte = NULL;
	return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;

	BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);

	if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
		return -EINVAL;

	prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;

	while (nr_pages--) {
		if (!pte) {
			first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
			if (!pte)
				return -ENOMEM;
		}
		/* We don't need lock here, nobody else
		 * touches the iova range
		 */
		BUG_ON(dma_pte_addr(pte));
		pte->val = (phys_pfn << VTD_PAGE_SHIFT) | prot;
		pte++;
		if (!nr_pages ||
		    (unsigned long)pte >> VTD_PAGE_SHIFT !=
		    (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
			domain_flush_cache(domain, first_pte,
					   (void *)pte - (void *)first_pte);
			pte = NULL;
		}
		iov_pfn++;
		phys_pfn++;
}
}
	return 0;

static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
				     unsigned long phys_pfn, unsigned long nr_pages,
				     int prot)
{
	return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
}
}


static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)