Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0c3a23a4 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: add support for SMMU fastpath"

parents 9d291084 ce469087
Loading
Loading
Loading
Loading
+30 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@ Optional:
- qcom,arm-smmu: SMMU is present and ARM SMMU driver is used
- qcom,msm-smmu: SMMU is present and QSMMU driver is used
- qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
- qcom,smmu-fast-map: Boolean context flag to set SMMU to fastpath mode
- ipa_smmu_ap: AP general purpose SMMU device
	compatible "qcom,ipa-smmu-ap-cb"
- ipa_smmu_wlan: WDI SMMU device
@@ -102,6 +103,18 @@ Optional properties:
- clock-names: This property shall contain the clock input names used
    by driver in same order as the clocks property.This should be "iface_clk"

IPA SMMU sub nodes

-compatible: "qcom,ipa-smmu-ap-cb" - represents the AP context bank.

-compatible: "qcom,ipa-smmu-wlan-cb" - represents IPA WLAN context bank.

-compatible: "qcom,ipa-smmu-uc-cb" - represents IPA uC context bank (for uC
					offload scenarios).
- iommus : the phandle and stream IDs for the SMMU used by this root

- qcom,iova-mapping: specifies the start address and size of iova space.

IPA SMP2P sub nodes

-compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from
@@ -174,4 +187,21 @@ qcom,ipa@fd4c0000 {
		compatible = "qcom,smp2pgpio-map-ipa-1-in";
		gpios = <&smp2pgpio_ipa_1_in 0 0>;
	};

	ipa_smmu_ap: ipa_smmu_ap {
		compatible = "qcom,ipa-smmu-ap-cb";
		iommus = <&anoc2_smmu 0x30>;
		qcom,iova-mapping = <0x10000000 0x40000000>;
	};

	ipa_smmu_wlan: ipa_smmu_wlan {
		compatible = "qcom,ipa-smmu-wlan-cb";
		iommus = <&anoc2_smmu 0x31>;
	};

	ipa_smmu_uc: ipa_smmu_uc {
		compatible = "qcom,ipa-smmu-uc-cb";
		iommus = <&anoc2_smmu 0x32>;
		qcom,iova-mapping = <0x40000000 0x20000000>;
	};
};
+205 −49
Original line number Diff line number Diff line
@@ -200,10 +200,15 @@ static struct clk *ipa_inactivity_clk;
struct ipa_context *ipa_ctx;
static struct device *master_dev;
struct platform_device *ipa_pdev;
static bool smmu_present;
static bool arm_smmu;
static bool smmu_disable_htw;
static bool smmu_s1_bypass;
static struct {
	bool present;
	bool arm_smmu;
	bool disable_htw;
	bool fast_map;
	bool s1_bypass;
	u32 ipa_base;
	u32 ipa_size;
} smmu_info;

static char *active_clients_table_buf;

@@ -382,16 +387,24 @@ struct iommu_domain *ipa2_get_smmu_domain(void)
	return NULL;
}

struct iommu_domain *ipa_get_uc_smmu_domain(void)
struct iommu_domain *ipa2_get_uc_smmu_domain(void)
{
	struct iommu_domain *domain = NULL;

	if (smmu_cb[IPA_SMMU_CB_UC].valid)
		domain = smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
	else
		return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;

	IPAERR("CB not valid\n");

	return NULL;
}

struct iommu_domain *ipa2_get_wlan_smmu_domain(void)
{
	if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
		return smmu_cb[IPA_SMMU_CB_WLAN].iommu;

	IPAERR("CB not valid\n");

	return domain;
	return NULL;
}

struct device *ipa2_get_dma_dev(void)
@@ -399,6 +412,17 @@ struct device *ipa2_get_dma_dev(void)
	return ipa_ctx->pdev;
}

/**
 * ipa2_get_smmu_ctx()- Return the smmu context
 *
 * Return value: pointer to smmu context address
 */
struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void)
{
	return &smmu_cb[IPA_SMMU_CB_AP];
}


/**
 * ipa2_get_wlan_smmu_ctx()- Return the wlan smmu context
 *
@@ -2701,7 +2725,7 @@ static int ipa_get_clks(struct device *dev)
		return PTR_ERR(ipa_clk);
	}

	if (smmu_present && arm_smmu) {
	if (smmu_info.present && smmu_info.arm_smmu) {
		smmu_clk = clk_get(dev, "smmu_clk");
		if (IS_ERR(smmu_clk)) {
			if (smmu_clk != ERR_PTR(-EPROBE_DEFER))
@@ -3572,12 +3596,13 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,

	ipa_ctx->pdev = ipa_dev;
	ipa_ctx->uc_pdev = ipa_dev;
	ipa_ctx->smmu_present = smmu_present;
	ipa_ctx->smmu_present = smmu_info.present;
	if (!ipa_ctx->smmu_present)
		ipa_ctx->smmu_s1_bypass = true;
	else
		ipa_ctx->smmu_s1_bypass = smmu_s1_bypass;
		ipa_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
	ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
	ipa_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
	ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type;
	ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode;
	ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
@@ -4103,7 +4128,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
	ipa_drv_res->modem_cfg_emb_pipe_flt = false;
	ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;

	smmu_disable_htw = of_property_read_bool(pdev->dev.of_node,
	smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
			"qcom,smmu-disable-htw");

	/* Get IPA HW Version */
@@ -4189,6 +4214,9 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
			ipa_drv_res->ipa_mem_base,
			ipa_drv_res->ipa_mem_size);

	smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
	smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;

	/* Get IPA BAM address */
	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
			"bam-base");
@@ -4245,9 +4273,10 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,

static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
	struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_WLAN];
	struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
	int disable_htw = 1;
	int atomic_ctx = 1;
	int fast = 1;
	int bypass = 1;
	int ret;

@@ -4260,18 +4289,20 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
		/* assume this failure is because iommu driver is not ready */
		return -EPROBE_DEFER;
	}
	cb->valid = true;

	if (smmu_disable_htw) {
	if (smmu_info.disable_htw) {
		ret = iommu_domain_set_attr(cb->iommu,
			DOMAIN_ATTR_COHERENT_HTW_DISABLE,
			&disable_htw);
		if (ret) {
			IPAERR("couldn't disable coherent HTW\n");
			cb->valid = false;
			return -EIO;
		}
	}

	if (smmu_s1_bypass) {
	if (smmu_info.s1_bypass) {
		if (iommu_domain_set_attr(cb->iommu,
			DOMAIN_ATTR_S1_BYPASS,
			&bypass)) {
@@ -4289,6 +4320,16 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
			return -EIO;
		}
		IPADBG("SMMU atomic set\n");
		if (smmu_info.fast_map) {
			if (iommu_domain_set_attr(cb->iommu,
				DOMAIN_ATTR_FAST,
				&fast)) {
				IPAERR("couldn't set fast map\n");
				cb->valid = false;
				return -EIO;
			}
			IPADBG("SMMU fast map set\n");
		}
	}

	ret = iommu_attach_device(cb->iommu, dev);
@@ -4298,31 +4339,47 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
		return ret;
	}

	if (!smmu_s1_bypass) {
	if (!smmu_info.s1_bypass) {
		IPAERR("map IPA region to WLAN_CB IOMMU\n");
		ret = iommu_map(cb->iommu, 0x680000, 0x680000,
			0x64000,
		ret = ipa_iommu_map(cb->iommu,
			rounddown(smmu_info.ipa_base, PAGE_SIZE),
			rounddown(smmu_info.ipa_base, PAGE_SIZE),
			roundup(smmu_info.ipa_size, PAGE_SIZE),
			IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
		if (ret) {
			IPAERR("map IPA to WLAN_CB IOMMU failed ret=%d\n",
				ret);
			arm_iommu_detach_device(cb->dev);
			cb->valid = false;
			return ret;
		}
	}

	cb->valid = true;

	return 0;
}

static int ipa_smmu_uc_cb_probe(struct device *dev)
{
	struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_UC];
	struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
	int disable_htw = 1;
	int atomic_ctx = 1;
	int ret;
	int fast = 1;
	int bypass = 1;
	u32 iova_ap_mapping[2];

	IPADBG("sub pdev=%p\n", dev);
	IPADBG("UC CB PROBE sub pdev=%p\n", dev);

	ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
		iova_ap_mapping, 2);
	if (ret) {
		IPAERR("Fail to read UC start/size iova addresses\n");
		return ret;
	}
	cb->va_start = iova_ap_mapping[0];
	cb->va_size = iova_ap_mapping[1];
	cb->va_end = cb->va_start + cb->va_size;
	IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);

	if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
		    dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
@@ -4330,26 +4387,33 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
		return -EOPNOTSUPP;
	}

	IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);

	cb->dev = dev;
	cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
			IPA_SMMU_UC_VA_START, IPA_SMMU_UC_VA_SIZE);
				cb->va_start, cb->va_size);
	if (IS_ERR(cb->mapping)) {
		IPADBG("Fail to create mapping\n");
		/* assume this failure is because iommu driver is not ready */
		return -EPROBE_DEFER;
	}
	IPADBG("SMMU mapping created\n");
	cb->valid = true;

	if (smmu_disable_htw) {
	IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
	if (smmu_info.disable_htw) {
		if (iommu_domain_set_attr(cb->mapping->domain,
				DOMAIN_ATTR_COHERENT_HTW_DISABLE,
				 &disable_htw)) {
			IPAERR("couldn't disable coherent HTW\n");
			arm_iommu_release_mapping(cb->mapping);
			cb->valid = false;
			return -EIO;
		}
	}

	IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
	if (smmu_s1_bypass) {
	if (smmu_info.s1_bypass) {
		if (iommu_domain_set_attr(cb->mapping->domain,
			DOMAIN_ATTR_S1_BYPASS,
			&bypass)) {
@@ -4359,6 +4423,27 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
			return -EIO;
		}
		IPADBG("SMMU S1 BYPASS\n");
	} else {
		if (iommu_domain_set_attr(cb->mapping->domain,
			DOMAIN_ATTR_ATOMIC,
			&atomic_ctx)) {
			IPAERR("couldn't set domain as atomic\n");
			arm_iommu_release_mapping(cb->mapping);
			cb->valid = false;
			return -EIO;
		}
		IPADBG("SMMU atomic set\n");
		if (smmu_info.fast_map) {
			if (iommu_domain_set_attr(cb->mapping->domain,
				DOMAIN_ATTR_FAST,
				&fast)) {
				IPAERR("couldn't set fast map\n");
				arm_iommu_release_mapping(cb->mapping);
				cb->valid = false;
				return -EIO;
			}
			IPADBG("SMMU fast map set\n");
		}
	}

	IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
@@ -4370,8 +4455,7 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
		return ret;
	}

	cb->valid = true;
	cb->next_addr = IPA_SMMU_UC_VA_END;
	cb->next_addr = cb->va_end;
	ipa_ctx->uc_pdev = dev;

	IPADBG("UC CB PROBE pdev=%p attached\n", dev);
@@ -4380,13 +4464,26 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)

static int ipa_smmu_ap_cb_probe(struct device *dev)
{
	struct ipa_smmu_cb_ctx *cb = &smmu_cb[IPA_SMMU_CB_AP];
	struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
	int result;
	int disable_htw = 1;
	int atomic_ctx = 1;
	int fast = 1;
	int bypass = 1;
	u32 iova_ap_mapping[2];

	IPADBG("sub pdev=%p\n", dev);
	IPADBG("AP CB probe: sub pdev=%p\n", dev);

	result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
		 iova_ap_mapping, 2);
	if (result) {
		IPAERR("Fail to read AP start/size iova addresses\n");
		return result;
	}
	cb->va_start = iova_ap_mapping[0];
	cb->va_size = iova_ap_mapping[1];
	cb->va_end = cb->va_start + cb->va_size;
	IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);

	if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
		    dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
@@ -4396,25 +4493,29 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)

	cb->dev = dev;
	cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
			IPA_SMMU_AP_VA_START, IPA_SMMU_AP_VA_SIZE);
					       cb->va_start,
					       cb->va_size);
	if (IS_ERR(cb->mapping)) {
		IPADBG("Fail to create mapping\n");
		/* assume this failure is because iommu driver is not ready */
		return -EPROBE_DEFER;
	}
	IPADBG("SMMU mapping created\n");
	cb->valid = true;


	if (smmu_disable_htw) {
	if (smmu_info.disable_htw) {
		if (iommu_domain_set_attr(cb->mapping->domain,
				DOMAIN_ATTR_COHERENT_HTW_DISABLE,
				 &disable_htw)) {
			IPAERR("couldn't disable coherent HTW\n");
			arm_iommu_detach_device(cb->dev);
			arm_iommu_release_mapping(cb->mapping);
			cb->valid = false;
			return -EIO;
		}
		IPADBG("SMMU disable HTW\n");
	}

	if (smmu_s1_bypass) {
	if (smmu_info.s1_bypass) {
		if (iommu_domain_set_attr(cb->mapping->domain,
			DOMAIN_ATTR_S1_BYPASS,
			&bypass)) {
@@ -4435,25 +4536,41 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
		}
		IPADBG("SMMU atomic set\n");

		IPADBG("map IPA region to AP_CB IOMMU\n");
		result = iommu_map(cb->mapping->domain, 0x680000, 0x680000,
			0x64000,
			IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
		if (result) {
			IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n",
				result);
			return result;
		if (iommu_domain_set_attr(cb->mapping->domain,
			DOMAIN_ATTR_FAST,
			&fast)) {
			IPAERR("couldn't set fast map\n");
			arm_iommu_release_mapping(cb->mapping);
			cb->valid = false;
			return -EIO;
		}
		IPADBG("SMMU fast map set\n");
	}

	result = arm_iommu_attach_device(cb->dev, cb->mapping);
	if (result) {
		IPAERR("couldn't attach to IOMMU ret=%d\n", result);
		cb->valid = false;
		return result;
	}

	cb->valid = true;
	smmu_present = true;
	if (!smmu_info.s1_bypass) {
		IPAERR("map IPA region to AP_CB IOMMU\n");
		result = ipa_iommu_map(cb->mapping->domain,
				rounddown(smmu_info.ipa_base, PAGE_SIZE),
				rounddown(smmu_info.ipa_base, PAGE_SIZE),
				roundup(smmu_info.ipa_size, PAGE_SIZE),
				IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
		if (result) {
			IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n",
				result);
			arm_iommu_release_mapping(cb->mapping);
			cb->valid = false;
			return result;
		}
	}

	smmu_info.present = true;

	if (!bus_scale_table)
		bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev);
@@ -4464,6 +4581,7 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
		IPAERR("ipa_init failed\n");
		arm_iommu_detach_device(cb->dev);
		arm_iommu_release_mapping(cb->mapping);
		cb->valid = false;
		return result;
	}

@@ -4506,8 +4624,13 @@ int ipa_plat_drv_probe(struct platform_device *pdev_p,
	if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
		if (of_property_read_bool(pdev_p->dev.of_node,
		    "qcom,smmu-s1-bypass"))
			smmu_s1_bypass = true;
		arm_smmu = true;
			smmu_info.s1_bypass = true;
		if (of_property_read_bool(pdev_p->dev.of_node,
		    "qcom,smmu-fast-map"))
			smmu_info.fast_map = true;
		smmu_info.arm_smmu = true;
		pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
			smmu_info.s1_bypass, smmu_info.fast_map);
		result = of_platform_populate(pdev_p->dev.of_node,
				pdrv_match, NULL, &pdev_p->dev);
	} else if (of_property_read_bool(pdev_p->dev.of_node,
@@ -4589,6 +4712,39 @@ struct ipa_context *ipa_get_ctx(void)
	return ipa_ctx;
}

int ipa_iommu_map(struct iommu_domain *domain,
	unsigned long iova, phys_addr_t paddr, size_t size, int prot)
{
	struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx();
	struct ipa_smmu_cb_ctx *uc_cb = ipa2_get_uc_smmu_ctx();

	IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
	IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);

	/* make sure no overlapping */
	if (domain == ipa2_get_smmu_domain()) {
		if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
			IPAERR("iommu AP overlap addr 0x%lx\n", iova);
			BUG();
			return -EFAULT;
		}
	} else if (domain == ipa2_get_wlan_smmu_domain()) {
		/* wlan is one time map */
	} else if (domain == ipa2_get_uc_smmu_domain()) {
		if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
			IPAERR("iommu uC overlap addr 0x%lx\n", iova);
			BUG();
			return -EFAULT;
		}
	} else {
		IPAERR("Unexpected domain 0x%p\n", domain);
		BUG();
		return -EFAULT;
	}

	return iommu_map(domain, iova, paddr, size, prot);
}

MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPA HW device driver");
+11 −9
Original line number Diff line number Diff line
@@ -100,6 +100,7 @@ static int ipa2_smmu_map_peer_bam(unsigned long dev)
	phys_addr_t base;
	u32 size;
	struct iommu_domain *smmu_domain;
	struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();

	if (!ipa_ctx->smmu_s1_bypass) {
		if (ipa_ctx->peer_bam_map_cnt == 0) {
@@ -109,19 +110,19 @@ static int ipa2_smmu_map_peer_bam(unsigned long dev)
			}
			smmu_domain = ipa2_get_smmu_domain();
			if (smmu_domain != NULL) {
				if (iommu_map(smmu_domain,
					IPA_SMMU_AP_VA_END,
				if (ipa_iommu_map(smmu_domain,
					cb->va_end,
					rounddown(base, PAGE_SIZE),
					roundup(size + base -
					rounddown(base, PAGE_SIZE), PAGE_SIZE),
					IOMMU_READ | IOMMU_WRITE |
					IOMMU_DEVICE)) {
					IPAERR("Fail to iommu_map\n");
					IPAERR("Fail to ipa_iommu_map\n");
					return -EINVAL;
				}
			}

			ipa_ctx->peer_bam_iova = IPA_SMMU_AP_VA_END;
			ipa_ctx->peer_bam_iova = cb->va_end;
			ipa_ctx->peer_bam_pa = base;
			ipa_ctx->peer_bam_map_size = size;
			ipa_ctx->peer_bam_dev = dev;
@@ -381,26 +382,26 @@ int ipa2_connect(const struct ipa_connect_params *in,
		base = ep->connect.data.iova;
		smmu_domain = ipa2_get_smmu_domain();
		if (smmu_domain != NULL) {
			if (iommu_map(smmu_domain,
			if (ipa_iommu_map(smmu_domain,
				rounddown(base, PAGE_SIZE),
				rounddown(base, PAGE_SIZE),
				roundup(ep->connect.data.size + base -
					rounddown(base, PAGE_SIZE), PAGE_SIZE),
				IOMMU_READ | IOMMU_WRITE)) {
				IPAERR("Fail to iommu_map data FIFO\n");
				IPAERR("Fail to ipa_iommu_map data FIFO\n");
				goto iommu_map_data_fail;
			}
		}
		ep->connect.desc.iova = ep->connect.desc.phys_base;
		base = ep->connect.desc.iova;
		if (smmu_domain != NULL) {
			if (iommu_map(smmu_domain,
			if (ipa_iommu_map(smmu_domain,
				rounddown(base, PAGE_SIZE),
				rounddown(base, PAGE_SIZE),
				roundup(ep->connect.desc.size + base -
					rounddown(base, PAGE_SIZE), PAGE_SIZE),
				IOMMU_READ | IOMMU_WRITE)) {
				IPAERR("Fail to iommu_map desc FIFO\n");
				IPAERR("Fail to ipa_iommu_map desc FIFO\n");
				goto iommu_map_desc_fail;
			}
		}
@@ -495,6 +496,7 @@ static int ipa2_smmu_unmap_peer_bam(unsigned long dev)
{
	size_t len;
	struct iommu_domain *smmu_domain;
	struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();

	if (!ipa_ctx->smmu_s1_bypass) {
		WARN_ON(dev != ipa_ctx->peer_bam_dev);
@@ -507,7 +509,7 @@ static int ipa2_smmu_unmap_peer_bam(unsigned long dev)
			smmu_domain = ipa2_get_smmu_domain();
			if (smmu_domain != NULL) {
				if (iommu_unmap(smmu_domain,
					IPA_SMMU_AP_VA_END, len) != len) {
					cb->va_end, len) != len) {
					IPAERR("Fail to iommu_unmap\n");
					return -EINVAL;
				}
+8 −7
Original line number Diff line number Diff line
@@ -144,13 +144,6 @@
#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
#define IPA_MEM_PART(x_) (ipa_ctx->ctrl->mem_partition.x_)

#define IPA_SMMU_AP_VA_START 0x1000
#define IPA_SMMU_AP_VA_SIZE 0x40000000
#define IPA_SMMU_AP_VA_END (IPA_SMMU_AP_VA_START +  IPA_SMMU_AP_VA_SIZE)
#define IPA_SMMU_UC_VA_START 0x40000000
#define IPA_SMMU_UC_VA_SIZE 0x20000000
#define IPA_SMMU_UC_VA_END (IPA_SMMU_UC_VA_START +  IPA_SMMU_UC_VA_SIZE)

#define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96
#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
@@ -183,6 +176,9 @@ struct ipa_smmu_cb_ctx {
	struct dma_iommu_mapping *mapping;
	struct iommu_domain *iommu;
	unsigned long next_addr;
	u32 va_start;
	u32 va_size;
	u32 va_end;
};

/**
@@ -1221,6 +1217,7 @@ struct ipa_context {
	struct ipa_flt_tbl flt_tbl[IPA_MAX_NUM_PIPES][IPA_IP_MAX];
	void __iomem *mmio;
	u32 ipa_wrapper_base;
	u32 ipa_wrapper_size;
	struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
	struct ipa_hdr_tbl hdr_tbl;
	struct ipa_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
@@ -1981,9 +1978,11 @@ int ipa_uc_mhi_print_stats(char *dbg_buff, int size);
int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
u32 ipa_get_num_pipes(void);
u32 ipa_get_sys_yellow_wm(void);
struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void);
struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void);
struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void);
struct iommu_domain *ipa_get_uc_smmu_domain(void);
struct iommu_domain *ipa2_get_wlan_smmu_domain(void);
int ipa2_ap_suspend(struct device *dev);
int ipa2_ap_resume(struct device *dev);
struct iommu_domain *ipa2_get_smmu_domain(void);
@@ -1998,4 +1997,6 @@ int ipa2_restore_suspend_handler(void);
void ipa_sps_irq_control_all(bool enable);
void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova,
	phys_addr_t paddr, size_t size, int prot);
#endif /* _IPA_I_H_ */
+4 −4
Original line number Diff line number Diff line
@@ -484,7 +484,7 @@ static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
		return -EINVAL;
	}

	ret = iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
	ret = ipa_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
			true_len,
			device ? (prot | IOMMU_DEVICE) : prot);
	if (ret) {
@@ -525,7 +525,7 @@ static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
		phys = page_to_phys(sg_page(sg));
		len = PAGE_ALIGN(sg->offset + sg->length);

		ret = iommu_map(cb->mapping->domain, va, phys, len, prot);
		ret = ipa_iommu_map(cb->mapping->domain, va, phys, len, prot);
		if (ret) {
			IPAERR("iommu map failed for pa=%pa len=%zu\n",
					&phys, len);
@@ -577,7 +577,7 @@ static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
	}

	if (ipa_ctx->wdi_map_cnt == 0)
		cb->next_addr = IPA_SMMU_UC_VA_END;
		cb->next_addr = cb->va_end;

}

@@ -1574,7 +1574,7 @@ int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
	for (i = 0; i < num_buffers; i++) {
		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
			&info[i].pa, info[i].iova, info[i].size);
		info[i].result = iommu_map(cb->iommu,
		info[i].result = ipa_iommu_map(cb->iommu,
			rounddown(info[i].iova, PAGE_SIZE),
			rounddown(info[i].pa, PAGE_SIZE),
			roundup(info[i].size + info[i].pa -
Loading