Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0dd9d4fd authored by Gidon Studinski's avatar Gidon Studinski
Browse files

msm: ipa: remove support for IPA 1.0



Removes support for IPA 1.0, which is an old IPA version. The old version
had HW bugs that required special handling in SW and can now be removed.
Additionally, there is no commercial product which uses IPA 1.0.

Change-Id: I7bbd3d2c0b58aab84ec26638f87cbbc2ce781d17
Signed-off-by: default avatarGidon Studinski <gidons@codeaurora.org>
parent 44c8947e
Loading
Loading
Loading
Loading
+7 −26
Original line number Diff line number Diff line
@@ -40,9 +40,6 @@
			       x == IPA_MODE_MOBILE_AP_WLAN)
#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
#define IPA_A5_MUX_HEADER_LENGTH (8)
#define IPA_DMA_POOL_SIZE (512)
#define IPA_DMA_POOL_ALIGNMENT (4)
#define IPA_DMA_POOL_BOUNDARY (1024)
#define IPA_ROUTING_RULE_BYTE_SIZE (4)
#define IPA_BAM_CNFG_BITS_VALv1_1 (0x7FFFE004)
#define IPA_BAM_CNFG_BITS_VALv2_0 (0xFFFFE004)
@@ -2319,7 +2316,7 @@ void _ipa_enable_clks_v2_0(void)
	}
}

void _ipa_enable_clks_v1(void)
void _ipa_enable_clks_v1_1(void)
{

	if (ipa_cnoc_clk) {
@@ -2408,7 +2405,7 @@ void ipa_enable_clks(void)
		WARN_ON(1);
}

void _ipa_disable_clks_v1(void)
void _ipa_disable_clks_v1_1(void)
{

	if (ipa_inactivity_clk)
@@ -2577,7 +2574,6 @@ static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
	if (!ipa_bam_mmio)
		return -ENOMEM;
	switch (ipa_ctx->ipa_hw_type) {
	case IPA_HW_v1_0:
	case IPA_HW_v1_1:
		reg_val = IPA_BAM_CNFG_BITS_VALv1_1;
		break;
@@ -3166,21 +3162,11 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
		result = -ENOMEM;
		goto fail_rx_pkt_wrapper_cache;
	}
	/*
	 * setup DMA pool 4 byte aligned, don't cross 1k boundaries, nominal
	 * size 512 bytes
	 * This is an issue with IPA HW v1.0 only.
	 */
	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
		ipa_ctx->dma_pool = dma_pool_create("ipa_1k",
				ipa_ctx->pdev,
				IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
				IPA_DMA_POOL_BOUNDARY);
	} else {

	/* Setup DMA pool */
	ipa_ctx->dma_pool = dma_pool_create("ipa_tx", ipa_ctx->pdev,
		IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
		0, 0);
	}
	if (!ipa_ctx->dma_pool) {
		IPAERR("cannot alloc DMA pool.\n");
		result = -ENOMEM;
@@ -3416,11 +3402,6 @@ fail_empty_rt_tbl:
			  ipa_ctx->empty_rt_tbl_mem.phys_base);
fail_apps_pipes:
	idr_destroy(&ipa_ctx->ipa_idr);
	/*
	 * DMA pool need to be released only for IPA HW v1.0 only.
	 */
	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
		dma_pool_destroy(ipa_ctx->dma_pool);
fail_dma_pool:
	kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
fail_rx_pkt_wrapper_cache:
+6 −45
Original line number Diff line number Diff line
@@ -100,25 +100,6 @@ static struct dentry *dfile_rm_stats;
static char dbg_buff[IPA_MAX_MSG_LEN];
static s8 ep_reg_idx;

int _ipa_read_gen_reg_v1_0(char *buff, int max_len)
{
	return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
			"IPA_VERSION=0x%x\n"
			"IPA_COMP_HW_VERSION=0x%x\n"
			"IPA_ROUTE=0x%x\n"
			"IPA_FILTER=0x%x\n"
			"IPA_SHARED_MEM_SIZE=0x%x\n"
			"IPA_HEAD_OF_LINE_BLOCK_EN=0x%x\n",
			ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
			ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
			ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_0),
			ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v1_0),
			ipa_read_reg(ipa_ctx->mmio,
				IPA_SHARED_MEM_SIZE_OFST_v1_0),
			ipa_read_reg(ipa_ctx->mmio,
				IPA_HEAD_OF_LINE_BLOCK_EN_OFST));
}

int _ipa_read_gen_reg_v1_1(char *buff, int max_len)
{
	return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -244,26 +225,6 @@ static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
	return count;
}

int _ipa_read_ep_reg_v1_0(char *buf, int max_len, int pipe)
{
	return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
			"IPA_ENDP_INIT_NAT_%u=0x%x\n"
			"IPA_ENDP_INIT_HDR_%u=0x%x\n"
			"IPA_ENDP_INIT_MODE_%u=0x%x\n"
			"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
			"IPA_ENDP_INIT_ROUTE_%u=0x%x\n",
			pipe, ipa_read_reg(ipa_ctx->mmio,
				IPA_ENDP_INIT_NAT_N_OFST_v1_0(pipe)),
				pipe, ipa_read_reg(ipa_ctx->mmio,
				IPA_ENDP_INIT_HDR_N_OFST_v1_0(pipe)),
				pipe, ipa_read_reg(ipa_ctx->mmio,
				IPA_ENDP_INIT_MODE_N_OFST_v1_0(pipe)),
				pipe, ipa_read_reg(ipa_ctx->mmio,
				IPA_ENDP_INIT_AGGR_N_OFST_v1_0(pipe)),
				pipe, ipa_read_reg(ipa_ctx->mmio,
				IPA_ENDP_INIT_ROUTE_N_OFST_v1_0(pipe)));
}

int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe)
{
	return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -1194,13 +1155,13 @@ static ssize_t ipa_read_wdi(struct file *file, char __user *ubuf,
	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}

void _ipa_write_dbg_cnt_v1(int option)
void _ipa_write_dbg_cnt_v1_1(int option)
{
	if (option == 1)
		ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1(0),
		ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0),
				IPA_DBG_CNTR_ON);
	else
		ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1(0),
		ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0),
				IPA_DBG_CNTR_OFF);
}

@@ -1238,12 +1199,12 @@ static ssize_t ipa_write_dbg_cnt(struct file *file, const char __user *buf,
	return count;
}

int _ipa_read_dbg_cnt_v1(char *buf, int max_len)
int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len)
{
	int regval;

	regval = ipa_read_reg(ipa_ctx->mmio,
			IPA_DEBUG_CNT_REG_N_OFST_v1(0));
			IPA_DEBUG_CNT_REG_N_OFST_v1_1(0));

	return scnprintf(buf, max_len,
			"IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+12 −72
Original line number Diff line number Diff line
@@ -264,9 +264,6 @@ static void ipa_wq_handle_tx(struct work_struct *work)
 * @in_atomic:  whether caller is in atomic context
 *
 * - Allocate tx_packet wrapper
 * - Allocate a bounce buffer due to HW constrains
 *   (This buffer will be used for the DMA command)
 * - Copy the data (desc->pyld) to the bounce buffer
 * - transfer data to the IPA
 * - after the transfer was done the SPS will
 *   notify the sending user via ipa_sps_irq_comp_tx()
@@ -293,29 +290,8 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
	}

	if (!desc->dma_address_valid) {
		if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
			WARN_ON(desc->len > 512);

			/*
			 * Due to a HW limitation, we need to make sure that
			 * the packet does not cross a 1KB boundary
			 */
			tx_pkt->bounce = dma_pool_alloc(
				ipa_ctx->dma_pool,
				mem_flag, &dma_address);
			if (!tx_pkt->bounce) {
				dma_address = 0;
			} else {
				WARN_ON(!ipa_straddle_boundary
					((u32)dma_address,
					(u32)dma_address + desc->len - 1,
					1024));
				memcpy(tx_pkt->bounce, desc->pyld, desc->len);
			}
		} else {
		dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld,
			desc->len, DMA_TO_DEVICE);
		}
	} else {
		dma_address = desc->dma_address;
		tx_pkt->no_unmap_dma = true;
@@ -369,12 +345,7 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
fail_sps_send:
	list_del(&tx_pkt->link);
	spin_unlock_bh(&sys->spinlock);
	if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
		dma_pool_free(ipa_ctx->dma_pool, tx_pkt->bounce,
				dma_address);
	else
		dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len,
				DMA_TO_DEVICE);
	dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
fail_dma_map:
	kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
fail_mem_alloc:
@@ -391,8 +362,6 @@ fail_mem_alloc:
 * This function is used for system-to-bam connection.
 * - SPS driver expect struct sps_transfer which will contain all the data
 *   for a transaction
 * - The sps_transfer struct will be pointing to bounce buffers for
 *   its DMA command (immediate command and data)
 * - ipa_tx_pkt_wrapper will be used for each ipa
 *   descriptor (allocated from wrappers cache)
 * - The wrapper struct will be configured for each ipa-desc payload and will
@@ -479,35 +448,11 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
		tx_pkt->mem.size = desc[i].len;

		if (!desc->dma_address_valid) {
			if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
				WARN_ON(tx_pkt->mem.size > 512);

				/*
				 * Due to a HW limitation, we need to make sure
				 * that the packet does not cross a
				 * 1KB boundary
				 */
				tx_pkt->bounce =
				dma_pool_alloc(ipa_ctx->dma_pool,
					       mem_flag,
					       &tx_pkt->mem.phys_base);
				if (!tx_pkt->bounce) {
					tx_pkt->mem.phys_base = 0;
				} else {
					WARN_ON(!ipa_straddle_boundary(
						(u32)tx_pkt->mem.phys_base,
						(u32)tx_pkt->mem.phys_base +
						tx_pkt->mem.size - 1, 1024));
					memcpy(tx_pkt->bounce, tx_pkt->mem.base,
						tx_pkt->mem.size);
				}
			} else {
			tx_pkt->mem.phys_base =
				dma_map_single(ipa_ctx->pdev,
				tx_pkt->mem.base,
				tx_pkt->mem.size,
				DMA_TO_DEVICE);
			}
		} else {
			tx_pkt->mem.phys_base = desc->dma_address;
			tx_pkt->no_unmap_dma = true;
@@ -525,7 +470,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
		tx_pkt->user2 = desc[i].user2;

		/*
		 * Point the iovec to the bounce buffer and
		 * Point the iovec to the buffer and
		 * add this packet to system pipe context.
		 */
		iovec->addr = tx_pkt->mem.phys_base;
@@ -565,11 +510,6 @@ failure:
	for (j = 0; j < i; j++) {
		next_pkt = list_next_entry(tx_pkt, link);
		list_del(&tx_pkt->link);
		if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
			dma_pool_free(ipa_ctx->dma_pool,
					tx_pkt->bounce,
					tx_pkt->mem.phys_base);
		else
		dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
				tx_pkt->mem.size,
				DMA_TO_DEVICE);
+3 −3
Original line number Diff line number Diff line
@@ -526,7 +526,7 @@ proc_err:
 *
 * Returns:	0 on success, negative on failure
 */
static int ipa_generate_flt_hw_tbl_v1(enum ipa_ip_type ip,
static int ipa_generate_flt_hw_tbl_v1_1(enum ipa_ip_type ip,
		struct ipa_mem_buffer *mem)
{
	u32 hdr_top = 0;
@@ -625,7 +625,7 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
	}
}

int __ipa_commit_flt_v1(enum ipa_ip_type ip)
int __ipa_commit_flt_v1_1(enum ipa_ip_type ip)
{
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer *mem;
@@ -656,7 +656,7 @@ int __ipa_commit_flt_v1(enum ipa_ip_type ip)
		goto fail_alloc_cmd;
	}

	if (ipa_generate_flt_hw_tbl_v1(ip, mem)) {
	if (ipa_generate_flt_hw_tbl_v1_1(ip, mem)) {
		IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
		goto fail_hw_tbl_gen;
	}
+1 −1
Original line number Diff line number Diff line
@@ -170,7 +170,7 @@ static int ipa_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
 * __ipa_commit_hdr() commits hdr to hardware
 * This function needs to be called with a locked mutex.
 */
int __ipa_commit_hdr_v1(void)
int __ipa_commit_hdr_v1_1(void)
{
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer *mem;
Loading