Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4f3b7e93 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: camera: smmu: Use force guard page for IPE"

parents e7999ae0 5ac11cac
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@ struct dma_iommu_mapping {
	dma_addr_t		base;
	u32			min_iova_align;
	struct page		*guard_page;
	u32			force_guard_page_len;

	struct dma_fast_smmu_mapping *fast;
};
+24 −12
Original line number Diff line number Diff line
@@ -1183,7 +1183,8 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,

	size = PAGE_ALIGN(size);
	if (mapping->min_iova_align)
		guard_len = ALIGN(size, mapping->min_iova_align) - size;
		guard_len = ALIGN(size + mapping->force_guard_page_len,
				  mapping->min_iova_align) - size;
	else
		guard_len = 0;

@@ -1231,12 +1232,14 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,

	addr = addr & PAGE_MASK;
	size = PAGE_ALIGN(size);
	if (mapping->min_iova_align) {
		guard_len = ALIGN(size, mapping->min_iova_align) - size;
		iommu_unmap(mapping->domain, addr + size, guard_len);
	} else {
	if (mapping->min_iova_align)
		guard_len = ALIGN(size + mapping->force_guard_page_len,
				  mapping->min_iova_align) - size;
	else
		guard_len = 0;
	}

	if (guard_len)
		iommu_unmap(mapping->domain, addr + size, guard_len);

	start = (addr - mapping->base) >> PAGE_SHIFT;
	count = (size + guard_len) >> PAGE_SHIFT;
@@ -1987,21 +1990,30 @@ bitmap_iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
	int vmid = VMID_HLOS;
	int min_iova_align = 0;
	int force_iova_guard_page = 0;

	iommu_domain_get_attr(mapping->domain,
			DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
			&min_iova_align);
	iommu_domain_get_attr(mapping->domain,
			DOMAIN_ATTR_SECURE_VMID, &vmid);
	iommu_domain_get_attr(mapping->domain,
			      DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE,
			      &force_iova_guard_page);

	if (vmid >= VMID_LAST || vmid < 0)
		vmid = VMID_HLOS;

	if (min_iova_align) {
		mapping->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
		mapping->guard_page = arm_smmu_errata_get_guard_page(vmid);
	mapping->min_iova_align = (min_iova_align) ? ARM_SMMU_MIN_IOVA_ALIGN :
		PAGE_SIZE;

	if (force_iova_guard_page)
		mapping->force_guard_page_len = PAGE_SIZE;

	mapping->guard_page =
		arm_smmu_errata_get_guard_page(vmid);
	if (!mapping->guard_page)
		return -ENOMEM;
	}

	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
							__GFP_NORETRY);
+29 −0
Original line number Diff line number Diff line
@@ -542,6 +542,7 @@ struct arm_smmu_domain {
	bool				qsmmuv500_errata1_init;
	bool				qsmmuv500_errata1_client;
	bool				qsmmuv500_errata2_min_align;
	bool				is_force_guard_page;
};

static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -3245,6 +3246,12 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
		*((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
		ret = 0;
		break;
	case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE:
		*((int *)data) = !!(smmu_domain->attributes
			& (1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE));
		ret = 0;
		break;

	default:
		ret = -ENODEV;
		break;
@@ -3447,6 +3454,28 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
				1 << DOMAIN_ATTR_CB_STALL_DISABLE;
		ret = 0;
		break;

	case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE: {
		int force_iova_guard_page = *((int *)data);

		if (smmu_domain->smmu != NULL) {
			dev_err(smmu_domain->smmu->dev,
			  "cannot change force guard page attribute while attached\n");
			ret = -EBUSY;
			break;
		}

		if (force_iova_guard_page)
			smmu_domain->attributes |=
				1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE;
		else
			smmu_domain->attributes &=
				~(1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE);

		ret = 0;
		break;
	}

	default:
		ret = -ENODEV;
	}
+27 −12
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@ struct iommu_dma_cookie {
	spinlock_t		msi_lock;
	u32			min_iova_align;
	struct page		*guard_page;
	u32			force_guard_page_len;
};

static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
@@ -130,20 +131,31 @@ static int iommu_dma_arm_smmu_errata_init(struct iommu_domain *domain)
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	int vmid = VMID_HLOS;
	int min_iova_align = 0;
	int force_iova_guard_page = 0;


	iommu_domain_get_attr(domain,
			DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
			&min_iova_align);
	iommu_domain_get_attr(domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
	iommu_domain_get_attr(domain,
			      DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE,
			      &force_iova_guard_page);

	if (vmid >= VMID_LAST || vmid < 0)
		vmid = VMID_HLOS;

	if (min_iova_align) {
		cookie->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
		cookie->guard_page = arm_smmu_errata_get_guard_page(vmid);
	cookie->min_iova_align = (min_iova_align) ? ARM_SMMU_MIN_IOVA_ALIGN :
		PAGE_SIZE;

	if (force_iova_guard_page)
		cookie->force_guard_page_len = PAGE_SIZE;

	cookie->guard_page =
		arm_smmu_errata_get_guard_page(vmid);
	if (!cookie->guard_page)
		return -ENOMEM;
	}

	return 0;
}

@@ -244,7 +256,8 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
	dma_addr_t ret_iova;

	if (cookie->min_iova_align)
		guard_len = ALIGN(size, cookie->min_iova_align) - size;
		guard_len = ALIGN(size + cookie->force_guard_page_len,
				  cookie->min_iova_align) - size;
	else
		guard_len = 0;
	iova_len = (size + guard_len) >> shift;
@@ -290,12 +303,14 @@ static void iommu_dma_free_iova(struct iommu_domain *domain,
	unsigned long shift = iova_shift(iovad);
	unsigned long guard_len;

	if (cookie->min_iova_align) {
		guard_len = ALIGN(size, cookie->min_iova_align) - size;
		iommu_unmap(domain, iova + size, guard_len);
	} else {
	if (cookie->min_iova_align)
		guard_len = ALIGN(size + cookie->force_guard_page_len,
				  cookie->min_iova_align) - size;
	else
		guard_len = 0;
	}

	if (guard_len)
		iommu_unmap(domain, iova + size, guard_len);

	free_iova_fast(iovad, iova >> shift, (size + guard_len) >> shift);
}
+28 −14
Original line number Diff line number Diff line
@@ -163,7 +163,8 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
	dma_addr_t iova;

	if (mapping->min_iova_align)
		guard_len = ALIGN(size, mapping->min_iova_align) - size;
		guard_len = ALIGN(size + mapping->force_guard_page_len,
				  mapping->min_iova_align) - size;
	else
		guard_len = 0;

@@ -311,12 +312,15 @@ static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
	unsigned long nbits;
	unsigned long guard_len;

	if (mapping->min_iova_align) {
		guard_len = ALIGN(size, mapping->min_iova_align) - size;
		iommu_unmap(mapping->domain, iova + size, guard_len);
	} else {
	if (mapping->min_iova_align)
		guard_len = ALIGN(size + mapping->force_guard_page_len,
				  mapping->min_iova_align) - size;
	else
		guard_len = 0;
	}

	if (guard_len)
		iommu_unmap(mapping->domain, iova + size, guard_len);

	nbits = (size + guard_len) >> FAST_PAGE_SHIFT;


@@ -898,20 +902,30 @@ static int fast_smmu_errata_init(struct dma_iommu_mapping *mapping)
	struct dma_fast_smmu_mapping *fast = mapping->fast;
	int vmid = VMID_HLOS;
	int min_iova_align = 0;
	int force_iova_guard_page = 0;

	iommu_domain_get_attr(mapping->domain,
			      DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
			      &min_iova_align);
	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
	iommu_domain_get_attr(mapping->domain,
			      DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE,
			      &force_iova_guard_page);

	if (vmid >= VMID_LAST || vmid < 0)
		vmid = VMID_HLOS;

	if (min_iova_align) {
		fast->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
		fast->guard_page = arm_smmu_errata_get_guard_page(vmid);
	fast->min_iova_align = (min_iova_align) ?  ARM_SMMU_MIN_IOVA_ALIGN :
		PAGE_SIZE;

	if (force_iova_guard_page)
		fast->force_guard_page_len = PAGE_SIZE;

	fast->guard_page =
		arm_smmu_errata_get_guard_page(vmid);
	if (!fast->guard_page)
		return -ENOMEM;
	}

	return 0;
}

Loading