Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee389171 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: Code update to support wdi3.0 with smmu-s1"

parents 3e787eb2 fda45d74
Loading
Loading
Loading
Loading
+21 −3
Original line number Original line Diff line number Diff line
@@ -512,6 +512,9 @@ static int ipa_create_ap_smmu_mapping_pa(phys_addr_t pa, size_t len,
		return -EINVAL;
		return -EINVAL;
	}
	}


	if (len > PAGE_SIZE)
		va = roundup(cb->next_addr, len);

	ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
	ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
			true_len,
			true_len,
			device ? (prot | IOMMU_MMIO) : prot);
			device ? (prot | IOMMU_MMIO) : prot);
@@ -565,7 +568,7 @@ static int ipa_create_ap_smmu_mapping_sgt(struct sg_table *sgt,
	struct scatterlist *sg;
	struct scatterlist *sg;
	unsigned long start_iova = va;
	unsigned long start_iova = va;
	phys_addr_t phys;
	phys_addr_t phys;
	size_t len;
	size_t len = 0;
	int count = 0;
	int count = 0;


	if (!cb->valid) {
	if (!cb->valid) {
@@ -577,6 +580,17 @@ static int ipa_create_ap_smmu_mapping_sgt(struct sg_table *sgt,
		return -EINVAL;
		return -EINVAL;
	}
	}


	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		/* directly get sg_tbl PA from wlan-driver */
		len += PAGE_ALIGN(sg->offset + sg->length);
	}

	if (len > PAGE_SIZE) {
		va = roundup(cb->next_addr,
				roundup_pow_of_two(len));
		start_iova = va;
	}

	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
		/* directly get sg_tbl PA from wlan-driver */
		/* directly get sg_tbl PA from wlan-driver */
		phys = sg->dma_address;
		phys = sg->dma_address;
@@ -662,10 +676,14 @@ static void ipa_release_ap_smmu_mappings(enum ipa_client_type client)


	if (IPA_CLIENT_IS_CONS(client)) {
	if (IPA_CLIENT_IS_CONS(client)) {
		start = IPA_WDI_TX_RING_RES;
		start = IPA_WDI_TX_RING_RES;
		if (ipa3_ctx->ipa_wdi3_over_gsi)
			end = IPA_WDI_TX_DB_RES;
		else
			end = IPA_WDI_CE_DB_RES;
			end = IPA_WDI_CE_DB_RES;
	} else {
	} else {
		start = IPA_WDI_RX_RING_RES;
		start = IPA_WDI_RX_RING_RES;
		if (ipa3_ctx->ipa_wdi2)
		if (ipa3_ctx->ipa_wdi2 ||
			ipa3_ctx->ipa_wdi3_over_gsi)
			end = IPA_WDI_RX_COMP_RING_WP_RES;
			end = IPA_WDI_RX_COMP_RING_WP_RES;
		else
		else
			end = IPA_WDI_RX_RING_RP_RES;
			end = IPA_WDI_RX_RING_RP_RES;
+158 −4
Original line number Original line Diff line number Diff line
@@ -74,6 +74,7 @@ static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
	const struct ipa_gsi_ep_config *gsi_ep_info;
	const struct ipa_gsi_ep_config *gsi_ep_info;
	int result, len;
	int result, len;
	unsigned long va;
	unsigned long va;
	uint32_t addr_low, addr_high;


	if (!info || !info_smmu || !ep) {
	if (!info || !info_smmu || !ep) {
		IPAERR("invalid input\n");
		IPAERR("invalid input\n");
@@ -211,23 +212,133 @@ static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
		IPAERR("failed to write evt ring scratch\n");
		IPAERR("failed to write evt ring scratch\n");
		goto fail_write_scratch;
		goto fail_write_scratch;
	}
	}
	/* write event ring db address */

	gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
	if (!is_smmu_enabled) {
		(u32)info->event_ring_doorbell_pa,
		IPADBG("smmu disabled\n");
		if (info->is_evt_rn_db_pcie_addr == true)
			IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
		else
			IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
		IPADBG_LOW("LSB 0x%x\n",
			(u32)info->event_ring_doorbell_pa);
		IPADBG_LOW("MSB 0x%x\n",
			(u32)((u64)info->event_ring_doorbell_pa >> 32));
			(u32)((u64)info->event_ring_doorbell_pa >> 32));
	} else {
		IPADBG("smmu enabled\n");
		if (info_smmu->is_evt_rn_db_pcie_addr == true)
			IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
		else
			IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
		IPADBG_LOW("LSB 0x%x\n",
			(u32)info_smmu->event_ring_doorbell_pa);
		IPADBG_LOW("MSB 0x%x\n",
			(u32)((u64)info_smmu->event_ring_doorbell_pa >> 32));
	}

	if (!is_smmu_enabled) {
		addr_low = (u32)info->event_ring_doorbell_pa;
		addr_high = (u32)((u64)info->event_ring_doorbell_pa >> 32);
	} else {
		if (dir == IPA_WDI3_TX_DIR) {
			if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES,
				true, info_smmu->event_ring_doorbell_pa,
				NULL, 4, true, &va)) {
				IPAERR("failed to get smmu mapping\n");
				result = -EFAULT;
				goto fail_write_scratch;
			}
		} else {
			if (ipa_create_gsi_smmu_mapping(
				IPA_WDI_RX_COMP_RING_WP_RES,
				true, info_smmu->event_ring_doorbell_pa,
				NULL, 4, true, &va)) {
				IPAERR("failed to get smmu mapping\n");
				result = -EFAULT;
				goto fail_write_scratch;
			}
		}
		addr_low = (u32)va;
		addr_high = (u32)((u64)va >> 32);
	}

	/*
	 * Arch specific:
	 * pcie addr which are not via smmu, use pa directly!
	 * pcie and DDR via 2 different port
	 * assert bit 40 to indicate it is pcie addr
	 * WDI-3.0, MSM --> pcie via smmu
	 * WDI-3.0, MDM --> pcie not via smmu + dual port
	 * assert bit 40 in case
	 */
	if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
		is_smmu_enabled) {
		/*
		 * Ir-respective of smmu enabled don't use IOVA addr
		 * since pcie not via smmu in MDM's
		 */
		if (info_smmu->is_evt_rn_db_pcie_addr == true) {
			addr_low = (u32)info_smmu->event_ring_doorbell_pa;
			addr_high =
				(u32)((u64)info_smmu->event_ring_doorbell_pa
				>> 32);
		}
	}

	/*
	 * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
	 * from wdi-3.0 interface document
	 */
	if (!is_smmu_enabled) {
		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
			info->is_evt_rn_db_pcie_addr)
			addr_high |= (1 << 8);
	} else {
		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
			info_smmu->is_evt_rn_db_pcie_addr)
			addr_high |= (1 << 8);
	}

	gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
			addr_low,
			addr_high);


	/* write channel scratch */
	/* write channel scratch */
	memset(&ch_scratch, 0, sizeof(ch_scratch));
	memset(&ch_scratch, 0, sizeof(ch_scratch));
	ch_scratch.wdi3.update_rp_moderation_threshold =
	ch_scratch.wdi3.update_rp_moderation_threshold =
		UPDATE_RP_MODERATION_THRESHOLD;
		UPDATE_RP_MODERATION_THRESHOLD;
	if (dir == IPA_WDI3_RX_DIR) {
	if (dir == IPA_WDI3_RX_DIR) {
		if (!is_smmu_enabled)
			ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
			ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
		else
			ch_scratch.wdi3.rx_pkt_offset = info_smmu->pkt_offset;
		/* this metadata reg offset need to be in words */
		/* this metadata reg offset need to be in words */
		ch_scratch.wdi3.endp_metadata_reg_offset =
		ch_scratch.wdi3.endp_metadata_reg_offset =
			ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
			ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
				gsi_ep_info->ipa_ep_num) / 4;
				gsi_ep_info->ipa_ep_num) / 4;
	}
	}


	if (!is_smmu_enabled) {
		IPADBG_LOW("smmu disabled\n");
		if (info->is_txr_rn_db_pcie_addr == true)
			IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
		else
			IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
		IPADBG_LOW("LSB 0x%x\n",
			(u32)info->transfer_ring_doorbell_pa);
		IPADBG_LOW("MSB 0x%x\n",
			(u32)((u64)info->transfer_ring_doorbell_pa >> 32));
	} else {
		IPADBG_LOW("smmu eabled\n");
		if (info_smmu->is_txr_rn_db_pcie_addr == true)
			IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
		else
			IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
		IPADBG_LOW("LSB 0x%x\n",
			(u32)info_smmu->transfer_ring_doorbell_pa);
		IPADBG_LOW("MSB 0x%x\n",
			(u32)((u64)info_smmu->transfer_ring_doorbell_pa >> 32));
	}

	if (!is_smmu_enabled) {
	if (!is_smmu_enabled) {
		ch_scratch.wdi3.wifi_rp_address_low =
		ch_scratch.wdi3.wifi_rp_address_low =
			(u32)info->transfer_ring_doorbell_pa;
			(u32)info->transfer_ring_doorbell_pa;
@@ -262,6 +373,49 @@ static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
				(u32)((u64)va >> 32);
				(u32)((u64)va >> 32);
		}
		}
	}
	}

	/*
	 * Arch specific:
	 * pcie addr which are not via smmu, use pa directly!
	 * pcie and DDR via 2 different port
	 * assert bit 40 to indicate it is pcie addr
	 * WDI-3.0, MSM --> pcie via smmu
	 * WDI-3.0, MDM --> pcie not via smmu + dual port
	 * assert bit 40 in case
	 */
	if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
		is_smmu_enabled) {
		/*
		 * Ir-respective of smmu enabled don't use IOVA addr
		 * since pcie not via smmu in MDM's
		 */
		if (info_smmu->is_txr_rn_db_pcie_addr == true) {
			ch_scratch.wdi3.wifi_rp_address_low =
				(u32)info_smmu->transfer_ring_doorbell_pa;
			ch_scratch.wdi3.wifi_rp_address_high =
				(u32)((u64)info_smmu->transfer_ring_doorbell_pa
				>> 32);
		}
	}

	/*
	 * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
	 * from wdi-3.0 interface document
	 */
	if (!is_smmu_enabled) {
		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
			info->is_txr_rn_db_pcie_addr)
			ch_scratch.wdi3.wifi_rp_address_high =
			(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
			(1 << 8));
	} else {
		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
			info_smmu->is_txr_rn_db_pcie_addr)
			ch_scratch.wdi3.wifi_rp_address_high =
			(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
			(1 << 8));
	}

	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
	if (result != GSI_STATUS_SUCCESS) {
	if (result != GSI_STATUS_SUCCESS) {
		IPAERR("failed to write evt ring scratch\n");
		IPAERR("failed to write evt ring scratch\n");
+8 −0
Original line number Original line Diff line number Diff line
@@ -97,10 +97,12 @@ struct ipa_wdi_reg_intf_in_params {
 * @transfer_ring_size:  size of the transfer ring
 * @transfer_ring_size:  size of the transfer ring
 * @transfer_ring_doorbell_pa:  physical address of the doorbell that
 * @transfer_ring_doorbell_pa:  physical address of the doorbell that
	IPA uC will update the tailpointer of the transfer ring
	IPA uC will update the tailpointer of the transfer ring
 * @is_txr_rn_db_pcie_addr: Bool indicated txr ring DB is pcie or not
 * @event_ring_base_pa:  physical address of the base of the event ring
 * @event_ring_base_pa:  physical address of the base of the event ring
 * @event_ring_size:  event ring size
 * @event_ring_size:  event ring size
 * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
 * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
	will update the headpointer of the event ring
	will update the headpointer of the event ring
 * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
 * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
 * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
	ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
	ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
 * @pkt_offset: packet offset (wdi header length)
 * @pkt_offset: packet offset (wdi header length)
@@ -113,10 +115,12 @@ struct ipa_wdi_pipe_setup_info {
	phys_addr_t  transfer_ring_base_pa;
	phys_addr_t  transfer_ring_base_pa;
	u32  transfer_ring_size;
	u32  transfer_ring_size;
	phys_addr_t  transfer_ring_doorbell_pa;
	phys_addr_t  transfer_ring_doorbell_pa;
	bool is_txr_rn_db_pcie_addr;


	phys_addr_t  event_ring_base_pa;
	phys_addr_t  event_ring_base_pa;
	u32  event_ring_size;
	u32  event_ring_size;
	phys_addr_t  event_ring_doorbell_pa;
	phys_addr_t  event_ring_doorbell_pa;
	bool is_evt_rn_db_pcie_addr;
	u16  num_pkt_buffers;
	u16  num_pkt_buffers;


	u16 pkt_offset;
	u16 pkt_offset;
@@ -132,10 +136,12 @@ struct ipa_wdi_pipe_setup_info {
 * @transfer_ring_size:  size of the transfer ring
 * @transfer_ring_size:  size of the transfer ring
 * @transfer_ring_doorbell_pa:  physical address of the doorbell that
 * @transfer_ring_doorbell_pa:  physical address of the doorbell that
	IPA uC will update the tailpointer of the transfer ring
	IPA uC will update the tailpointer of the transfer ring
 * @is_txr_rn_db_pcie_addr: Bool indicated  txr ring DB is pcie or not
 * @event_ring_base_pa:  physical address of the base of the event ring
 * @event_ring_base_pa:  physical address of the base of the event ring
 * @event_ring_size:  event ring size
 * @event_ring_size:  event ring size
 * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
 * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
	will update the headpointer of the event ring
	will update the headpointer of the event ring
 * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
 * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
 * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
	ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
	ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
 * @pkt_offset: packet offset (wdi header length)
 * @pkt_offset: packet offset (wdi header length)
@@ -148,10 +154,12 @@ struct ipa_wdi_pipe_setup_info_smmu {
	struct sg_table  transfer_ring_base;
	struct sg_table  transfer_ring_base;
	u32  transfer_ring_size;
	u32  transfer_ring_size;
	phys_addr_t  transfer_ring_doorbell_pa;
	phys_addr_t  transfer_ring_doorbell_pa;
	bool is_txr_rn_db_pcie_addr;


	struct sg_table  event_ring_base;
	struct sg_table  event_ring_base;
	u32  event_ring_size;
	u32  event_ring_size;
	phys_addr_t  event_ring_doorbell_pa;
	phys_addr_t  event_ring_doorbell_pa;
	bool is_evt_rn_db_pcie_addr;
	u16  num_pkt_buffers;
	u16  num_pkt_buffers;


	u16 pkt_offset;
	u16 pkt_offset;