Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 45b0d782 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu: Remove support for buffer alignment to ARM_SMMU_MIN_IOVA_ALIGN"

parents e05724d4 df19c6e3
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -23,8 +23,6 @@ struct dma_iommu_mapping {
	void			*bitmap;
	size_t			bits;
	dma_addr_t		base;
	u32			min_iova_align;
	struct page		*guard_page;

	struct dma_fast_smmu_mapping *fast;
};
+0 −2
Original line number Diff line number Diff line
@@ -39,8 +39,6 @@
#include <linux/of_address.h>
#include <linux/dma-mapping-fast.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <linux/arm-smmu-errata.h>
#include <soc/qcom/secure_buffer.h>


static int swiotlb __ro_after_init;
+1 −1
Original line number Diff line number Diff line
@@ -17,7 +17,7 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-errata.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o

drivers/iommu/arm-smmu-errata.c

deleted100644 → 0
+0 −44
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 */

#include <linux/kernel.h>
#include <soc/qcom/secure_buffer.h>
#include <linux/arm-smmu-errata.h>

static struct page *guard_pages[VMID_LAST];
static DEFINE_MUTEX(guard_page_lock);

struct page *arm_smmu_errata_get_guard_page(int vmid)
{
	struct page *page;
	int ret;
	int source_vm = VMID_HLOS;
	int dest_vm = vmid;
	int dest_perm = PERM_READ | PERM_WRITE | PERM_EXEC;
	size_t size = ARM_SMMU_MIN_IOVA_ALIGN;

	mutex_lock(&guard_page_lock);
	page = guard_pages[vmid];
	if (page)
		goto out;

	page = alloc_pages(GFP_KERNEL, get_order(size));
	if (!page)
		goto out;

	if (vmid != VMID_HLOS) {
		ret = hyp_assign_phys(page_to_phys(page), PAGE_ALIGN(size),
				&source_vm, 1,
				&dest_vm, &dest_perm, 1);
		if (ret && (ret != -EIO)) {
			__free_pages(page, get_order(size));
			page = NULL;
		}
	}
	guard_pages[vmid] = page;
out:
	mutex_unlock(&guard_page_lock);
	return page;
}
+0 −20
Original line number Diff line number Diff line
@@ -248,7 +248,6 @@ struct arm_smmu_device {
#define ARM_SMMU_OPT_NO_ASID_RETENTION	(1 << 5)
#define ARM_SMMU_OPT_STATIC_CB		(1 << 6)
#define ARM_SMMU_OPT_DISABLE_ATOS	(1 << 7)
#define ARM_SMMU_OPT_MIN_IOVA_ALIGN	(1 << 8)
	u32				options;
	enum arm_smmu_arch_version	version;
	enum arm_smmu_implementation	model;
@@ -367,7 +366,6 @@ struct arm_smmu_domain {
	/* nonsecure pool protected by pgtbl_lock */
	struct list_head		nonsecure_pool;
	struct iommu_domain		domain;
	bool				qsmmuv500_errata1_min_iova_align;
};

struct arm_smmu_option_prop {
@@ -387,7 +385,6 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
	{ ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
	{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
	{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
	{ ARM_SMMU_OPT_MIN_IOVA_ALIGN, "qcom,min-iova-align" },
	{ 0, NULL},
};

@@ -3373,10 +3370,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
			& (1 << DOMAIN_ATTR_NO_CFRE));
		ret = 0;
		break;
	case DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_IOVA_ALIGN:
		*((int *)data) = smmu_domain->qsmmuv500_errata1_min_iova_align;
		ret = 0;
		break;
	default:
		ret = -ENODEV;
		break;
@@ -4888,10 +4881,6 @@ module_exit(arm_smmu_exit);

#define TBU_DBG_TIMEOUT_US		100

#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK	0x3
#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT	0x8


struct actlr_setting {
	struct arm_smmu_smr smr;
	u32 actlr;
@@ -5322,15 +5311,6 @@ static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,

	writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);

	/*
	 * Prefetch only works properly if the start and end of all
	 * buffers in the page table are aligned to ARM_SMMU_MIN_IOVA_ALIGN.
	 */
	if (((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
			QSMMUV500_ACTLR_DEEP_PREFETCH_MASK) &&
				  (smmu->options & ARM_SMMU_OPT_MIN_IOVA_ALIGN))
		smmu_domain->qsmmuv500_errata1_min_iova_align = true;

	/*
	 * Flush the context bank after modifying ACTLR to ensure there
	 * are no cache entries with stale state
Loading