Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c189c144 authored by Sujeev Dias's avatar Sujeev Dias Committed by Gerrit - the friendly Code Review server
Browse files

mhi: cntrl: qcom: remove deprecated IOMMU APIs



IOMMU mapping APIs have been deprecated in favor of device
tree entries. Update MHI functions as required.

CRs-Fixed: 2398500
Change-Id: I0a7c0c9a83f1640430452ef07daf4f938b93b97d
Acked-by: default avatarBhaumik Vasav Bhatt <bbhatt@qti.qualcomm.com>
Signed-off-by: default avatarSujeev Dias <sdias@codeaurora.org>
parent 3c5d2305
Loading
Loading
Loading
Loading
+1 −131
Original line number Diff line number Diff line
@@ -26,7 +26,6 @@ struct arch_info {
	u32 bus_client;
	struct msm_pcie_register_event pcie_reg_event;
	struct pci_saved_state *pcie_state;
	struct dma_iommu_mapping *mapping;
	async_cookie_t cookie;
	void *boot_ipc_log;
	struct mhi_device *boot_dev;
@@ -198,8 +197,8 @@ void mhi_arch_esoc_ops_power_off(void *priv, bool mdm_state)
	/* wait for boot monitor to exit */
	async_synchronize_cookie(arch_info->cookie + 1);

	mhi_arch_iommu_deinit(mhi_cntrl);
	mhi_arch_pcie_deinit(mhi_cntrl);
	mhi_cntrl->dev = NULL;
	mhi_dev->powered_on = false;
}

@@ -506,135 +505,6 @@ void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
	mhi_arch_set_bus_request(mhi_cntrl, 0);
}

static struct dma_iommu_mapping *mhi_arch_create_iommu_mapping(
					struct mhi_controller *mhi_cntrl)
{
	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
	dma_addr_t base;
	size_t size;

	/*
	 * If S1_BYPASS enabled then iommu space is not used, however framework
	 * still require clients to create a mapping space before attaching. So
	 * set to smallest size required by iommu framework.
	 */
	if (mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS) {
		base = 0;
		size = PAGE_SIZE;
	} else {
		base = mhi_dev->iova_start;
		size = (mhi_dev->iova_stop - base) + 1;
	}

	MHI_LOG("Create iommu mapping of base:%pad size:%zu\n",
		&base, size);
	return __depr_arm_iommu_create_mapping(&pci_bus_type, base, size);
}

int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
{
	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
	struct arch_info *arch_info = mhi_dev->arch_info;
	u32 smmu_config = mhi_dev->smmu_cfg;
	struct dma_iommu_mapping *mapping = NULL;
	int ret;

	if (smmu_config) {
		mapping = mhi_arch_create_iommu_mapping(mhi_cntrl);
		if (IS_ERR(mapping)) {
			MHI_ERR("Failed to create iommu mapping\n");
			return PTR_ERR(mapping);
		}
	}

	if (smmu_config & MHI_SMMU_S1_BYPASS) {
		int s1_bypass = 1;

		ret = iommu_domain_set_attr(mapping->domain,
					    DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
		if (ret) {
			MHI_ERR("Failed to set attribute S1_BYPASS\n");
			goto release_mapping;
		}
	}

	if (smmu_config & MHI_SMMU_FAST) {
		int fast_map = 1;

		ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST,
					    &fast_map);
		if (ret) {
			MHI_ERR("Failed to set attribute FAST_MAP\n");
			goto release_mapping;
		}
	}

	if (smmu_config & MHI_SMMU_ATOMIC) {
		int atomic = 1;

		ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC,
					    &atomic);
		if (ret) {
			MHI_ERR("Failed to set attribute ATOMIC\n");
			goto release_mapping;
		}
	}

	if (smmu_config & MHI_SMMU_FORCE_COHERENT) {
		int force_coherent = 1;

		ret = iommu_domain_set_attr(mapping->domain,
					DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
					&force_coherent);
		if (ret) {
			MHI_ERR("Failed to set attribute FORCE_COHERENT\n");
			goto release_mapping;
		}
	}

	if (smmu_config) {
		ret = __depr_arm_iommu_attach_device(&mhi_dev->pci_dev->dev,
						     mapping);
		if (ret) {
			MHI_ERR("Error attach device, ret:%d\n", ret);
			goto release_mapping;
		}
		arch_info->mapping = mapping;
	}

	mhi_cntrl->dev = &mhi_dev->pci_dev->dev;

	ret = dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
	if (ret) {
		MHI_ERR("Error setting dma mask, ret:%d\n", ret);
		goto release_device;
	}

	return 0;

release_device:
	__depr_arm_iommu_detach_device(mhi_cntrl->dev);

release_mapping:
	__depr_arm_iommu_release_mapping(mapping);

	return ret;
}

void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
{
	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
	struct arch_info *arch_info = mhi_dev->arch_info;
	struct dma_iommu_mapping *mapping = arch_info->mapping;

	if (mapping) {
		__depr_arm_iommu_detach_device(mhi_cntrl->dev);
		__depr_arm_iommu_release_mapping(mapping);
	}
	arch_info->mapping = NULL;
	mhi_cntrl->dev = NULL;
}

static int mhi_arch_drv_suspend(struct mhi_controller *mhi_cntrl)
{
	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+43 −46
Original line number Diff line number Diff line
@@ -642,8 +642,9 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
	struct mhi_dev *mhi_dev;
	struct device_node *of_node = pci_dev->dev.of_node;
	const struct firmware_info *firmware_info;
	bool use_bb;
	u64 addr_win[2];
	bool use_s1;
	u32 addr_win[2];
	const char *iommu_dma_type;
	int ret, i;

	if (!of_node)
@@ -659,48 +660,45 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
	mhi_cntrl->dev_id = pci_dev->device;
	mhi_cntrl->bus = pci_dev->bus->number;
	mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn);
	mhi_cntrl->of_node = of_node;

	ret = of_property_read_u32(of_node, "qcom,smmu-cfg",
				   &mhi_dev->smmu_cfg);
	if (ret)
		goto error_register;
	mhi_cntrl->iova_start = memblock_start_of_DRAM();
	mhi_cntrl->iova_stop = memblock_end_of_DRAM();

	use_bb = of_property_read_bool(of_node, "mhi,use-bb");
	of_node = of_parse_phandle(mhi_cntrl->of_node, "qcom,iommu-group", 0);
	if (of_node) {
		use_s1 = true;

		/*
	 * if s1 translation enabled or using bounce buffer pull iova addr
	 * from dt
		 * s1 translation can be in bypass or fastmap mode
		 * if "qcom,iommu-dma" property is missing, we assume s1 is
		 * enabled and in default (no fastmap/atomic) mode
		 */
	if (use_bb || (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH &&
		       !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))) {
		ret = of_property_count_elems_of_size(of_node, "qcom,addr-win",
						      sizeof(addr_win));
		if (ret != 1)
			goto error_register;
		ret = of_property_read_u64_array(of_node, "qcom,addr-win",
		ret = of_property_read_string(of_node, "qcom,iommu-dma",
					      &iommu_dma_type);
		if (!ret && !strcmp("bypass", iommu_dma_type))
			use_s1 = false;

		/*
		 * if s1 translation enabled pull iova addr from dt using
		 * iommu-dma-addr-pool property specified addresses
		 */
		if (use_s1) {
			ret = of_property_read_u32_array(of_node,
						"qcom,iommu-dma-addr-pool",
						addr_win, 2);
			if (ret)
			goto error_register;
	} else {
		addr_win[0] = memblock_start_of_DRAM();
		addr_win[1] = memblock_end_of_DRAM();
	}

	mhi_dev->iova_start = addr_win[0];
	mhi_dev->iova_stop = addr_win[1];
				return ERR_PTR(-EINVAL);

			/*
	 * If S1 is enabled, set MHI_CTRL start address to 0 so we can use low
	 * level mapping api to map buffers outside of smmu domain
			 * If S1 is enabled, set MHI_CTRL start address to 0
			 * so we can use low level mapping api to map buffers
			 * outside of smmu domain
			 */
	if (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH &&
	    !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))
			mhi_cntrl->iova_start = 0;
	else
		mhi_cntrl->iova_start = addr_win[0];

	mhi_cntrl->iova_stop = mhi_dev->iova_stop;
	mhi_cntrl->of_node = of_node;
			mhi_cntrl->iova_stop = addr_win[0] + addr_win[1];
		}
	}

	mhi_dev->pci_dev = pci_dev;
	spin_lock_init(&mhi_dev->lpm_lock);
@@ -767,13 +765,15 @@ int mhi_pci_probe(struct pci_dev *pci_dev,
	if (ret)
		return ret;

	ret = mhi_arch_iommu_init(mhi_cntrl);
	mhi_cntrl->dev = &mhi_dev->pci_dev->dev;

	ret = dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
	if (ret)
		goto error_iommu_init;
		goto error_pci_probe;

	ret = mhi_init_pci_dev(mhi_cntrl);
	if (ret)
		goto error_init_pci;
		goto error_pci_probe;

	/* start power up sequence */
	if (!debug_mode) {
@@ -791,10 +791,7 @@ int mhi_pci_probe(struct pci_dev *pci_dev,
error_power_up:
	mhi_deinit_pci_dev(mhi_cntrl);

error_init_pci:
	mhi_arch_iommu_deinit(mhi_cntrl);

error_iommu_init:
error_pci_probe:
	mhi_arch_pcie_deinit(mhi_cntrl);

	return ret;
+0 −16
Original line number Diff line number Diff line
@@ -33,7 +33,6 @@ enum mhi_suspend_mode {
struct mhi_dev {
	struct pci_dev *pci_dev;
	bool drv_supported;
	u32 smmu_cfg;
	int resn;
	void *arch_info;
	bool powered_on;
@@ -58,26 +57,11 @@ int mhi_pci_probe(struct pci_dev *pci_dev,
int mhi_arch_power_up(struct mhi_controller *mhi_cntrl);
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl);
int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl);

#else

static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
{
	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);

	mhi_cntrl->dev = &mhi_dev->pci_dev->dev;

	return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
}

static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
{
}

static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
	return 0;