Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c074d6b authored by Perry Randise's avatar Perry Randise
Browse files

msm: ipa3: removed deprecated smmu calls



Previously deprecated smmu calls have been removed.  These changes
also caused the reorganization of a key data structure and logic.

Change-Id: I8f75e1ce52970a2d9f5bff97799d9e14c7ffd2d4
CRs-Fixed: 2398133
Signed-off-by: default avatarPerry Randise <prandise@codeaurora.org>
parent 9a64886b
Loading
Loading
Loading
Loading
+171 −277
Original line number Diff line number Diff line
@@ -130,8 +130,6 @@ struct ipa3_context *ipa3_ctx;
static struct {
	bool present[IPA_SMMU_CB_MAX];
	bool arm_smmu;
	bool fast_map;
	bool s1_bypass_arr[IPA_SMMU_CB_MAX];
	bool use_64_bit_dma_mask;
	u32 ipa_base;
	u32 ipa_size;
@@ -351,59 +349,34 @@ static void ipa3_active_clients_log_destroy(void)

static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];

struct iommu_domain *ipa3_get_smmu_domain(void)
struct iommu_domain *ipa3_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type)
{
	if (smmu_cb[IPA_SMMU_CB_AP].valid)
		return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
	if (VALID_IPA_SMMU_CB_TYPE(cb_type) && smmu_cb[cb_type].valid)
		return smmu_cb[cb_type].iommu_domain;

	IPAERR("CB not valid\n");
	IPAERR("cb_type(%d) not valid\n", cb_type);

	return NULL;
}

struct iommu_domain *ipa3_get_uc_smmu_domain(void)
struct iommu_domain *ipa3_get_smmu_domain(void)
{
	if (smmu_cb[IPA_SMMU_CB_UC].valid)
		return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;

	IPAERR("CB not valid\n");

	return NULL;
	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_AP);
}

struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
struct iommu_domain *ipa3_get_uc_smmu_domain(void)
{
	if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
		return smmu_cb[IPA_SMMU_CB_WLAN].iommu;

	IPAERR("CB not valid\n");

	return NULL;
	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_UC);
}

struct iommu_domain *ipa3_get_11ad_smmu_domain(void)
struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
{
	if (smmu_cb[IPA_SMMU_CB_11AD].valid)
		return smmu_cb[IPA_SMMU_CB_11AD].iommu;

	IPAERR("CB not valid\n");

	return NULL;
	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_WLAN);
}

struct iommu_domain *ipa3_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type)
struct iommu_domain *ipa3_get_11ad_smmu_domain(void)
{

	if ((cb_type == IPA_SMMU_CB_WLAN || cb_type == IPA_SMMU_CB_11AD)
		&& smmu_cb[cb_type].valid)
		return smmu_cb[cb_type].iommu;

	if (smmu_cb[cb_type].valid)
		return smmu_cb[cb_type].mapping->domain;

	IPAERR("CB#%d not valid\n", cb_type);

	return NULL;
	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_11AD);
}

struct device *ipa3_get_dma_dev(void)
@@ -6293,77 +6266,66 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
	int atomic_ctx = 1;
	int fast = 1;
	int bypass = 1;
	int ret;
	int fast = 0;
	int bypass = 0;
	u32 add_map_size;
	const u32 *add_map;
	int i;
	u32 iova_ap_mapping[2];

	IPADBG("sub pdev=%pK\n", dev);
	IPADBG("WLAN CB PROBE dev=%pK\n", dev);

	if (!smmu_info.present[IPA_SMMU_CB_WLAN]) {
		IPAERR("WLAN SMMU is disabled\n");
		return 0;
	}

	cb->dev = dev;
	cb->iommu = iommu_domain_alloc(dev->bus);
	if (!cb->iommu) {
		IPAERR("could not alloc iommu domain\n");
		/* assume this failure is because iommu driver is not ready */
		return -EPROBE_DEFER;
	IPADBG("WLAN CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);

	cb->iommu_domain = iommu_get_domain_for_dev(dev);
	if (IS_ERR_OR_NULL(cb->iommu_domain)) {
		IPAERR("could not get iommu domain\n");
		return -EINVAL;
	}
	cb->valid = true;

	if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass") ||
		ipa3_ctx->ipa_config_is_mhi) {
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = true;
		ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = true;
	IPADBG("WLAN CB PROBE mapping retrieved\n");

		if (iommu_domain_set_attr(cb->iommu,
					DOMAIN_ATTR_S1_BYPASS,
					&bypass)) {
			IPAERR("couldn't set bypass\n");
			cb->valid = false;
			return -EIO;
		}
		IPADBG("WLAN SMMU S1 BYPASS\n");
	} else {
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = false;
		ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = false;
	cb->dev   = dev;
	cb->valid = true;

		if (iommu_domain_set_attr(cb->iommu,
					DOMAIN_ATTR_ATOMIC,
					&atomic_ctx)) {
			IPAERR("couldn't disable coherent HTW\n");
			cb->valid = false;
			return -EIO;
	cb->va_start = cb->va_end  = cb->va_size = 0;
	if (of_property_read_u32_array(
			dev->of_node, "qcom,iommu-dma-addr-pool",
			iova_ap_mapping, 2) == 0) {
		cb->va_start = iova_ap_mapping[0];
		cb->va_size  = iova_ap_mapping[1];
		cb->va_end   = cb->va_start + cb->va_size;
	}
		IPADBG(" WLAN SMMU ATTR ATOMIC\n");

		if (smmu_info.fast_map) {
			if (iommu_domain_set_attr(cb->iommu,
						DOMAIN_ATTR_FAST,
						&fast)) {
				IPAERR("couldn't set fast map\n");
				cb->valid = false;
				return -EIO;
			}
			IPADBG("SMMU fast map set\n");
		}
	}
	IPADBG("WLAN CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
		   dev, cb->va_start, cb->va_size);

	pr_info("IPA smmu_info.s1_bypass_arr[WLAN]=%d smmu_info.fast_map=%d\n",
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN], smmu_info.fast_map);
	/*
	 * Prior to these calls to iommu_domain_get_attr(), these
	 * attributes were set in this function relative to dtsi values
	 * defined for this driver.  In other words, if corresponding ipa
	 * driver owned values were found in the dtsi, they were read and
	 * set here.
	 *
	 * In this new world, the developer will use iommu owned dtsi
	 * settings to set them there.  This new logic below, simply
	 * checks to see if they've been set in dtsi.  If so, the logic
	 * further below acts accordingly...
	 */
	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);

	IPADBG(
	  "WLAN CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
	  dev, bypass, fast);

	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = (bypass != 0);

	ret = iommu_attach_device(cb->iommu, dev);
	if (ret) {
		IPAERR("could not attach device ret=%d\n", ret);
		cb->valid = false;
		return ret;
	}
	/* MAP ipa-uc ram */
	add_map = of_get_property(dev->of_node,
		"qcom,additional-mapping", &add_map_size);
@@ -6388,41 +6350,29 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
				iova_p, pa_p, size_p);
			IPADBG("mapping 0x%lx to 0x%pa size %d\n",
				iova_p, &pa_p, size_p);
			ipa3_iommu_map(cb->iommu,
			ipa3_iommu_map(cb->iommu_domain,
				iova_p, pa_p, size_p,
				IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
		}
	}

	return 0;
}

static int ipa_smmu_uc_cb_probe(struct device *dev)
{
	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
	int atomic_ctx = 1;
	int bypass = 1;
	int fast = 1;
	int ret;
	int bypass = 0;
	int fast = 0;
	u32 iova_ap_mapping[2];

	IPADBG("UC CB PROBE sub pdev=%pK\n", dev);
	IPADBG("UC CB PROBE dev=%pK\n", dev);

	if (!smmu_info.present[IPA_SMMU_CB_UC]) {
		IPAERR("UC SMMU is disabled\n");
		return 0;
	}

	ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
			iova_ap_mapping, 2);
	if (ret) {
		IPAERR("Fail to read UC start/size iova addresses\n");
		return ret;
	}
	cb->va_start = iova_ap_mapping[0];
	cb->va_size = iova_ap_mapping[1];
	cb->va_end = cb->va_start + cb->va_size;
	IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);

	if (smmu_info.use_64_bit_dma_mask) {
		if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
			dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
@@ -6436,75 +6386,52 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
			return -EOPNOTSUPP;
		}
	}
	IPADBG("UC CB PROBE=%pK create IOMMU mapping\n", dev);

	cb->dev = dev;
	cb->mapping = __depr_arm_iommu_create_mapping(dev->bus,
			cb->va_start, cb->va_size);
	if (IS_ERR_OR_NULL(cb->mapping)) {
		IPADBG("Fail to create mapping\n");
		/* assume this failure is because iommu driver is not ready */
		return -EPROBE_DEFER;
	IPADBG("UC CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);

	cb->iommu_domain = iommu_get_domain_for_dev(dev);
	if (IS_ERR_OR_NULL(cb->iommu_domain)) {
		IPAERR("could not get iommu domain\n");
		return -EINVAL;
	}
	IPADBG("SMMU mapping created\n");
	cb->valid = true;

	IPADBG("UC CB PROBE sub pdev=%pK set attribute\n", dev);
	IPADBG("UC CB PROBE mapping retrieved\n");

	if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass") ||
		ipa3_ctx->ipa_config_is_mhi) {
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = true;
		ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = true;
	cb->dev   = dev;
	cb->valid = true;

		if (iommu_domain_set_attr(cb->mapping->domain,
			DOMAIN_ATTR_S1_BYPASS,
			&bypass)) {
			IPAERR("couldn't set bypass\n");
			__depr_arm_iommu_release_mapping(cb->mapping);
			cb->valid = false;
			return -EIO;
		}
		IPADBG("UC SMMU S1 BYPASS\n");
	} else {
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = false;
		ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = false;

		if (iommu_domain_set_attr(cb->mapping->domain,
			DOMAIN_ATTR_ATOMIC,
			&atomic_ctx)) {
			IPAERR("couldn't set domain as atomic\n");
			__depr_arm_iommu_release_mapping(cb->mapping);
			cb->valid = false;
			return -EIO;
	cb->va_start = cb->va_end  = cb->va_size = 0;
	if (of_property_read_u32_array(
			dev->of_node, "qcom,iommu-dma-addr-pool",
			iova_ap_mapping, 2) == 0) {
		cb->va_start = iova_ap_mapping[0];
		cb->va_size  = iova_ap_mapping[1];
		cb->va_end   = cb->va_start + cb->va_size;
	}
		IPADBG("SMMU atomic set\n");

		if (smmu_info.fast_map) {
			if (iommu_domain_set_attr(cb->mapping->domain,
				DOMAIN_ATTR_FAST,
				&fast)) {
				IPAERR("couldn't set fast map\n");
				__depr_arm_iommu_release_mapping(cb->mapping);
				cb->valid = false;
				return -EIO;
			}
			IPADBG("SMMU fast map set\n");
		}
	}
	IPADBG("UC CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
		   dev, cb->va_start, cb->va_size);

	pr_info("IPA smmu_info.s1_bypass_arr[UC]=%d smmu_info.fast_map=%d\n",
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC], smmu_info.fast_map);
	/*
	 * Prior to these calls to iommu_domain_get_attr(), these
	 * attributes were set in this function relative to dtsi values
	 * defined for this driver.  In other words, if corresponding ipa
	 * driver owned values were found in the dtsi, they were read and
	 * set here.
	 *
	 * In this new world, the developer will use iommu owned dtsi
	 * settings to set them there.  This new logic below, simply
	 * checks to see if they've been set in dtsi.  If so, the logic
	 * further below acts accordingly...
	 */
	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);

	IPADBG("UC CB PROBE sub pdev=%pK attaching IOMMU device\n", dev);
	ret = __depr_arm_iommu_attach_device(cb->dev, cb->mapping);
	if (ret) {
		IPAERR("could not attach device ret=%d\n", ret);
		__depr_arm_iommu_release_mapping(cb->mapping);
		cb->valid = false;
		return ret;
	}
	IPADBG("UC CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
		   dev, bypass, fast);

	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = (bypass != 0);

	cb->next_addr = cb->va_end;
	ipa3_ctx->uc_pdev = dev;

	return 0;
@@ -6513,11 +6440,8 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
static int ipa_smmu_ap_cb_probe(struct device *dev)
{
	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
	int result;
	int atomic_ctx = 1;
	int fast = 1;
	int bypass = 1;
	u32 iova_ap_mapping[2];
	int fast = 0;
	int bypass = 0;
	u32 add_map_size;
	const u32 *add_map;
	void *smem_addr;
@@ -6530,25 +6454,15 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
	u32 size_p;
	phys_addr_t iova;
	phys_addr_t pa;
	u32 iova_ap_mapping[2];

	IPADBG("AP CB probe: sub pdev=%pK\n", dev);
	IPADBG("AP CB PROBE dev=%pK\n", dev);

	if (!smmu_info.present[IPA_SMMU_CB_AP]) {
		IPAERR("AP SMMU is disabled");
		return 0;
	}

	result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
		iova_ap_mapping, 2);
	if (result) {
		IPAERR("Fail to read AP start/size iova addresses\n");
		return result;
	}
	cb->va_start = iova_ap_mapping[0];
	cb->va_size = iova_ap_mapping[1];
	cb->va_end = cb->va_start + cb->va_size;
	IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);

	if (smmu_info.use_64_bit_dma_mask) {
		if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
			dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
@@ -6563,65 +6477,50 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
		}
	}

	cb->dev = dev;
	cb->mapping = __depr_arm_iommu_create_mapping(dev->bus,
					cb->va_start, cb->va_size);
	if (IS_ERR_OR_NULL(cb->mapping)) {
		IPADBG("Fail to create mapping\n");
		/* assume this failure is because iommu driver is not ready */
		return -EPROBE_DEFER;
	IPADBG("AP CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);

	cb->iommu_domain = iommu_get_domain_for_dev(dev);
	if (IS_ERR_OR_NULL(cb->iommu_domain)) {
		IPAERR("could not get iommu domain\n");
		return -EINVAL;
	}
	IPADBG("SMMU mapping created\n");

	IPADBG("AP CB PROBE mapping retrieved\n");

	cb->dev   = dev;
	cb->valid = true;

	if (of_property_read_bool(dev->of_node,
		"qcom,smmu-s1-bypass") || ipa3_ctx->ipa_config_is_mhi) {
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = true;
		ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = true;
		if (iommu_domain_set_attr(cb->mapping->domain,
				DOMAIN_ATTR_S1_BYPASS,
				&bypass)) {
			IPAERR("couldn't set bypass\n");
			__depr_arm_iommu_release_mapping(cb->mapping);
			cb->valid = false;
			return -EIO;
		}
		IPADBG("AP/USB SMMU S1 BYPASS\n");
	} else {
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = false;
		ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = false;
		if (iommu_domain_set_attr(cb->mapping->domain,
				DOMAIN_ATTR_ATOMIC,
				&atomic_ctx)) {
			IPAERR("couldn't set domain as atomic\n");
			__depr_arm_iommu_release_mapping(cb->mapping);
			cb->valid = false;
			return -EIO;
	cb->va_start = cb->va_end  = cb->va_size = 0;
	if (of_property_read_u32_array(
			dev->of_node, "qcom,iommu-dma-addr-pool",
			iova_ap_mapping, 2) == 0) {
		cb->va_start = iova_ap_mapping[0];
		cb->va_size  = iova_ap_mapping[1];
		cb->va_end   = cb->va_start + cb->va_size;
	}
		IPADBG("AP/USB SMMU atomic set\n");

		if (smmu_info.fast_map) {
			if (iommu_domain_set_attr(cb->mapping->domain,
				DOMAIN_ATTR_FAST,
				&fast)) {
				IPAERR("couldn't set fast map\n");
				__depr_arm_iommu_release_mapping(cb->mapping);
				cb->valid = false;
				return -EIO;
			}
			IPADBG("SMMU fast map set\n");
		}
	}
	IPADBG("AP CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
		   dev, cb->va_start, cb->va_size);

	pr_info("IPA smmu_info.s1_bypass_arr[AP]=%d smmu_info.fast_map=%d\n",
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP], smmu_info.fast_map);
	/*
	 * Prior to these calls to iommu_domain_get_attr(), these
	 * attributes were set in this function relative to dtsi values
	 * defined for this driver.  In other words, if corresponding ipa
	 * driver owned values were found in the dtsi, they were read and
	 * set here.
	 *
	 * In this new world, the developer will use iommu owned dtsi
	 * settings to set them there.  This new logic below, simply
	 * checks to see if they've been set in dtsi.  If so, the logic
	 * further below acts accordingly...
	 */
	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);

	result = __depr_arm_iommu_attach_device(cb->dev, cb->mapping);
	if (result) {
		IPAERR("couldn't attach to IOMMU ret=%d\n", result);
		cb->valid = false;
		return result;
	}
	IPADBG("AP CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
		   dev, bypass, fast);

	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = (bypass != 0);

	add_map = of_get_property(dev->of_node,
		"qcom,additional-mapping", &add_map_size);
@@ -6646,7 +6545,7 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
				iova_p, pa_p, size_p);
			IPADBG("mapping 0x%lx to 0x%pa size %d\n",
				iova_p, &pa_p, size_p);
			ipa3_iommu_map(cb->mapping->domain,
			ipa3_iommu_map(cb->iommu_domain,
				iova_p, pa_p, size_p,
				IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
		}
@@ -6690,11 +6589,12 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
				iova_p, pa_p, size_p);
			IPADBG("mapping 0x%lx to 0x%pa size %d\n",
				iova_p, &pa_p, size_p);
			ipa3_iommu_map(cb->mapping->domain,
			ipa3_iommu_map(cb->iommu_domain,
				iova_p, pa_p, size_p,
				IOMMU_READ | IOMMU_WRITE);

	smmu_info.present[IPA_SMMU_CB_AP] = true;

	ipa3_ctx->pdev = dev;
	cb->next_addr = cb->va_end;

@@ -6703,50 +6603,51 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)

static int ipa_smmu_11ad_cb_probe(struct device *dev)
{
	int ret;
	int s1_bypass = 0;
	int bypass = 0;
	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
	u32 iova_ap_mapping[2];

	IPADBG("11ad CB probe: sub dev=%pK\n", dev);
	IPADBG("11AD CB probe: dev=%pK\n", dev);

	if (!smmu_info.present[IPA_SMMU_CB_11AD]) {
		IPAERR("11ad SMMU is disabled");
		IPAERR("11AD SMMU is disabled");
		return 0;
	}

	cb->dev = dev;
	cb->iommu = iommu_get_domain_for_dev(dev);
	if (!cb->iommu) {
	cb->iommu_domain = iommu_get_domain_for_dev(dev);
	if (IS_ERR_OR_NULL(cb->iommu_domain)) {
		IPAERR("could not get iommu domain\n");
		/* assume this failure is because iommu driver is not ready */
		return -EPROBE_DEFER;
		return -EINVAL;
	}

	cb->dev   = dev;
	cb->valid = true;

	ret = iommu_domain_get_attr(
		cb->iommu, DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
	if (ret) {
		IPAERR("can't get DOMAIN_ATTR_S1_BYPASS\n");
		return ret;
	cb->va_start = cb->va_end  = cb->va_size = 0;
	if (of_property_read_u32_array(
			dev->of_node, "qcom,iommu-dma-addr-pool",
			iova_ap_mapping, 2) == 0) {
		cb->va_start = iova_ap_mapping[0];
		cb->va_size  = iova_ap_mapping[1];
		cb->va_end   = cb->va_start + cb->va_size;
	}

	if (s1_bypass) {
		IPADBG("11AD SMMU S1 BYPASS\n");
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_11AD] = true;
		ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] = true;
	} else {
		IPADBG("11AD SMMU S1 enabled\n");
		smmu_info.s1_bypass_arr[IPA_SMMU_CB_11AD] = false;
		ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] = false;
	}
	IPADBG("11AD CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
		   dev, cb->va_start, cb->va_size);

	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);

	IPADBG("11AD CB PROBE dev=%pK DOMAIN ATTRS bypass=%d\n",
		   dev, bypass);

	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] = (bypass != 0);

	if (of_property_read_bool(dev->of_node, "qcom,shared-cb")) {
		IPADBG("using shared CB\n");
		IPADBG("11AD using shared CB\n");
		cb->shared = true;
	}

	return 0;
	IPADBG("exit\n");
}

static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type)
@@ -6907,17 +6808,10 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p,
	}

	if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
		if (of_property_read_bool(pdev_p->dev.of_node,
			"qcom,smmu-fast-map"))
			smmu_info.fast_map = true;
		if (of_property_read_bool(pdev_p->dev.of_node,
			"qcom,use-64-bit-dma-mask"))
			smmu_info.use_64_bit_dma_mask = true;
		smmu_info.arm_smmu = true;
	} else if (of_property_read_bool(pdev_p->dev.of_node,
				"qcom,msm-smmu")) {
		IPAERR("Legacy IOMMU not supported\n");
		result = -EOPNOTSUPP;
	} else {
		if (of_property_read_bool(pdev_p->dev.of_node,
			"qcom,use-64-bit-dma-mask")) {
+2 −2
Original line number Diff line number Diff line
@@ -525,7 +525,7 @@ int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map,
	struct iommu_domain *smmu_domain;
	int res;

	if (cb_type >= IPA_SMMU_CB_MAX) {
	if (!VALID_IPA_SMMU_CB_TYPE(cb_type)) {
		IPAERR("invalid cb_type\n");
		return -EINVAL;
	}
@@ -572,7 +572,7 @@ int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
	int i;
	struct page *page;

	if (cb_type >= IPA_SMMU_CB_MAX) {
	if (!VALID_IPA_SMMU_CB_TYPE(cb_type)) {
		IPAERR("invalid cb_type\n");
		return -EINVAL;
	}
+4 −2
Original line number Diff line number Diff line
@@ -445,8 +445,7 @@ struct ipa3_client_names {
struct ipa_smmu_cb_ctx {
	bool valid;
	struct device *dev;
	struct dma_iommu_mapping *mapping;
	struct iommu_domain *iommu;
	struct iommu_domain *iommu_domain;
	unsigned long next_addr;
	u32 va_start;
	u32 va_size;
@@ -1433,6 +1432,9 @@ enum ipa_smmu_cb_type {
	IPA_SMMU_CB_MAX
};

#define VALID_IPA_SMMU_CB_TYPE(t) \
	((t) >= IPA_SMMU_CB_AP && (t) < IPA_SMMU_CB_MAX)

/**
 * struct ipa3_char_device_context - IPA character device
 * @class: pointer to the struct class
+2 −2
Original line number Diff line number Diff line
@@ -725,7 +725,7 @@ static void imp_mhi_shutdown(void)
				imp_ctx->dev_info.chdb_base, PAGE_SIZE,
				&iova_p, &pa_p, &size_p);

			iommu_unmap(cb->mapping->domain, iova_p, size_p);
			iommu_unmap(cb->iommu_domain, iova_p, size_p);
		}
	}
	if (!imp_ctx->in_lpm &&
@@ -836,7 +836,7 @@ static int imp_mhi_probe_cb(struct mhi_device *mhi_dev,
			imp_ctx->dev_info.chdb_base, PAGE_SIZE,
			&iova_p, &pa_p, &size_p);

		ret = ipa3_iommu_map(cb->mapping->domain, iova_p, pa_p, size_p,
		ret = ipa3_iommu_map(cb->iommu_domain, iova_p, pa_p, size_p,
			IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
		if (ret)
			goto fail;
+10 −10
Original line number Diff line number Diff line
@@ -515,7 +515,7 @@ static int ipa_create_ap_smmu_mapping_pa(phys_addr_t pa, size_t len,
	if (len > PAGE_SIZE)
		va = roundup(cb->next_addr, len);

	ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
	ret = ipa3_iommu_map(cb->iommu_domain, va, rounddown(pa, PAGE_SIZE),
			true_len,
			device ? (prot | IOMMU_MMIO) : prot);
	if (ret) {
@@ -544,7 +544,7 @@ static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
		return -EINVAL;
	}

	ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
	ret = ipa3_iommu_map(cb->iommu_domain, va, rounddown(pa, PAGE_SIZE),
			true_len,
			device ? (prot | IOMMU_MMIO) : prot);
	if (ret) {
@@ -596,7 +596,7 @@ static int ipa_create_ap_smmu_mapping_sgt(struct sg_table *sgt,
		phys = sg->dma_address;
		len = PAGE_ALIGN(sg->offset + sg->length);

		ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
		ret = ipa3_iommu_map(cb->iommu_domain, va, phys, len, prot);
		if (ret) {
			IPAERR("iommu map failed for pa=%pa len=%zu\n",
					&phys, len);
@@ -613,7 +613,7 @@ static int ipa_create_ap_smmu_mapping_sgt(struct sg_table *sgt,

bad_mapping:
	for_each_sg(sgt->sgl, sg, count, i)
		iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
		iommu_unmap(cb->iommu_domain, sg_dma_address(sg),
				sg_dma_len(sg));
	return -EINVAL;
}
@@ -647,7 +647,7 @@ static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
		phys = sg->dma_address;
		len = PAGE_ALIGN(sg->offset + sg->length);

		ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
		ret = ipa3_iommu_map(cb->iommu_domain, va, phys, len, prot);
		if (ret) {
			IPAERR("iommu map failed for pa=%pa len=%zu\n",
					&phys, len);
@@ -664,7 +664,7 @@ static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,

bad_mapping:
	for_each_sg(sgt->sgl, sg, count, i)
		iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
		iommu_unmap(cb->iommu_domain, sg_dma_address(sg),
				sg_dma_len(sg));
	return -EINVAL;
}
@@ -692,7 +692,7 @@ static void ipa_release_ap_smmu_mappings(enum ipa_client_type client)
	for (i = start; i <= end; i++) {
		if (wdi_res[i].valid) {
			for (j = 0; j < wdi_res[i].nents; j++) {
				iommu_unmap(cb->mapping->domain,
				iommu_unmap(cb->iommu_domain,
					wdi_res[i].res[j].iova,
					wdi_res[i].res[j].size);
				ipa3_ctx->wdi_map_cnt--;
@@ -728,7 +728,7 @@ static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
	for (i = start; i <= end; i++) {
		if (wdi_res[i].valid) {
			for (j = 0; j < wdi_res[i].nents; j++) {
				iommu_unmap(cb->mapping->domain,
				iommu_unmap(cb->iommu_domain,
					wdi_res[i].res[j].iova,
					wdi_res[i].res[j].size);
				ipa3_ctx->wdi_map_cnt--;
@@ -2893,7 +2893,7 @@ int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
	for (i = 0; i < num_buffers; i++) {
		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
			&info[i].pa, info[i].iova, info[i].size);
		info[i].result = ipa3_iommu_map(cb->iommu,
		info[i].result = ipa3_iommu_map(cb->iommu_domain,
			rounddown(info[i].iova, PAGE_SIZE),
			rounddown(info[i].pa, PAGE_SIZE),
			roundup(info[i].size + info[i].pa -
@@ -2923,7 +2923,7 @@ int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
	for (i = 0; i < num_buffers; i++) {
		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
			&info[i].pa, info[i].iova, info[i].size);
		info[i].result = iommu_unmap(cb->iommu,
		info[i].result = iommu_unmap(cb->iommu_domain,
			rounddown(info[i].iova, PAGE_SIZE),
			roundup(info[i].size + info[i].pa -
				rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE));