Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0140975 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: shared memory partitioning"

parents 37ee8610 bdc595e8
Loading
Loading
Loading
Loading
+301 −70
Original line number Diff line number Diff line
@@ -126,26 +126,6 @@ static struct msm_bus_scale_pdata ipa_bus_client_pdata = {

struct ipa_context *ipa_ctx;

static bool hdr_tbl_lcl = 1;
module_param(hdr_tbl_lcl, bool, 0644);
MODULE_PARM_DESC(hdr_tbl_lcl, "where hdr tbl resides 1-local; 0-system");
static bool ip4_rt_tbl_lcl;
module_param(ip4_rt_tbl_lcl, bool, 0644);
MODULE_PARM_DESC(ip4_rt_tbl_lcl,
		"where ip4 rt tables reside 1-local; 0-system");
static bool ip6_rt_tbl_lcl;
module_param(ip6_rt_tbl_lcl, bool, 0644);
MODULE_PARM_DESC(ip6_rt_tbl_lcl,
		"where ip6 rt tables reside 1-local; 0-system");
static bool ip4_flt_tbl_lcl = 1;
module_param(ip4_flt_tbl_lcl, bool, 0644);
MODULE_PARM_DESC(ip4_flt_tbl_lcl,
		"where ip4 flt tables reside 1-local; 0-system");
static bool ip6_flt_tbl_lcl = 1;
module_param(ip6_flt_tbl_lcl, bool, 0644);
MODULE_PARM_DESC(ip6_flt_tbl_lcl,
		"where ip6 flt tables reside 1-local; 0-system");

static int ipa_load_pipe_connection(struct platform_device *pdev,
				    enum a2_mux_pipe_direction pipe_dir,
				    struct a2_mux_pipe_connection     *pdata);
@@ -449,9 +429,6 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
	case IPA_IOC_RESET_FLT:
		retval = ipa_reset_flt(arg);
		break;
	case IPA_IOC_DUMP:
		ipa_dump();
		break;
	case IPA_IOC_GET_RT_TBL:
		if (copy_from_user(header, (u8 *)arg,
					sizeof(struct ipa_ioc_get_rt_tbl))) {
@@ -764,6 +741,269 @@ bail:
	return ret;
}

static int ipa_init_sram(void)
{
	u32 *ipa_sram_mmio;
	unsigned long phys_addr;

	phys_addr = ipa_ctx->ipa_wrapper_base + IPA_REG_BASE_OFST +
		IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(
				ipa_ctx->smem_restricted_bytes / 4);
	ipa_sram_mmio = ioremap(phys_addr,
			ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
	if (!ipa_sram_mmio)
		return -ENOMEM;

#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val)

	IPA_SRAM_SET(IPA_v2_RAM_V6_FLT_OFST - 4, IPA_CANARY_VAL);
	IPA_SRAM_SET(IPA_v2_RAM_V6_FLT_OFST, IPA_CANARY_VAL);
	IPA_SRAM_SET(IPA_v2_RAM_V4_RT_OFST - 4, IPA_CANARY_VAL);
	IPA_SRAM_SET(IPA_v2_RAM_V4_RT_OFST, IPA_CANARY_VAL);
	IPA_SRAM_SET(IPA_v2_RAM_V6_RT_OFST, IPA_CANARY_VAL);
	IPA_SRAM_SET(IPA_v2_RAM_MODEM_HDR_OFST, IPA_CANARY_VAL);
	IPA_SRAM_SET(IPA_v2_RAM_MODEM_OFST, IPA_CANARY_VAL);
	IPA_SRAM_SET(IPA_v2_RAM_APPS_V4_FLT_OFST, IPA_CANARY_VAL);
	IPA_SRAM_SET(IPA_v2_RAM_END_OFST, IPA_CANARY_VAL);

	iounmap(ipa_sram_mmio);
	return 0;
}

static int ipa_init_hdr(void)
{
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_hdr_init_local cmd;
	int rc = 0;

	mem.size = IPA_v2_RAM_MODEM_HDR_SIZE + IPA_v2_RAM_APPS_HDR_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
		return -ENOMEM;
	}
	memset(mem.base, 0, mem.size);

	cmd.hdr_table_src_addr = mem.phys_base;
	cmd.size_hdr_table = mem.size;
	cmd.hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
		IPA_v2_RAM_MODEM_HDR_OFST;

	desc.opcode = IPA_HDR_INIT_LOCAL;
	desc.pyld = &cmd;
	desc.len = sizeof(struct ipa_hdr_init_local);
	desc.type = IPA_IMM_CMD_DESC;
	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);

	if (ipa_send_cmd(1, &desc)) {
		IPAERR("fail to send immediate command\n");
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	return rc;
}

static int ipa_init_rt4(void)
{
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_ip_v4_routing_init v4_cmd;
	u32 *entry;
	int i;
	int rc = 0;

	for (i = IPA_v2_V4_MODEM_RT_INDEX_LO;
			i <= IPA_v2_V4_MODEM_RT_INDEX_HI; i++)
		ipa_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
	IPADBG("v4 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v4]);

	mem.size = IPA_v2_RAM_V4_RT_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
		return -ENOMEM;
	}

	entry = mem.base;
	for (i = 0; i < IPA_v2_RAM_V4_NUM_INDEX; i++) {
		*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
		entry++;
	}

	desc.opcode = IPA_IP_V4_ROUTING_INIT;
	v4_cmd.ipv4_rules_addr = mem.phys_base;
	v4_cmd.size_ipv4_rules = mem.size;
	v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes +
		IPA_v2_RAM_V4_RT_OFST;
	IPADBG("putting Routing IPv4 rules to phys 0x%x",
				v4_cmd.ipv4_addr);

	desc.pyld = &v4_cmd;
	desc.len = sizeof(struct ipa_ip_v4_routing_init);
	desc.type = IPA_IMM_CMD_DESC;
	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);

	if (ipa_send_cmd(1, &desc)) {
		IPAERR("fail to send immediate command\n");
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	return rc;
}

static int ipa_init_rt6(void)
{
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_ip_v6_routing_init v6_cmd;
	u32 *entry;
	int i;
	int rc = 0;

	for (i = IPA_v2_V6_MODEM_RT_INDEX_LO;
			i <= IPA_v2_V6_MODEM_RT_INDEX_HI; i++)
		ipa_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
	IPADBG("v6 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v6]);

	mem.size = IPA_v2_RAM_V6_RT_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
		return -ENOMEM;
	}

	entry = mem.base;
	for (i = 0; i < IPA_v2_RAM_V6_NUM_INDEX; i++) {
		*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
		entry++;
	}

	desc.opcode = IPA_IP_V6_ROUTING_INIT;
	v6_cmd.ipv6_rules_addr = mem.phys_base;
	v6_cmd.size_ipv6_rules = mem.size;
	v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes +
		IPA_v2_RAM_V6_RT_OFST;
	IPADBG("putting Routing IPv6 rules to phys 0x%x",
				v6_cmd.ipv6_addr);

	desc.pyld = &v6_cmd;
	desc.len = sizeof(struct ipa_ip_v6_routing_init);
	desc.type = IPA_IMM_CMD_DESC;
	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);

	if (ipa_send_cmd(1, &desc)) {
		IPAERR("fail to send immediate command\n");
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	return rc;
}

static int ipa_init_flt4(void)
{
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_ip_v4_filter_init v4_cmd;
	u32 *entry;
	int i;
	int rc = 0;

	mem.size = IPA_v2_RAM_V4_FLT_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
		return -ENOMEM;
	}

	entry = mem.base;

	*entry = ((0xFFFFF << 1) | 0x1);
	entry++;

	for (i = 0; i <= IPA_NUM_PIPES; i++) {
		*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
		entry++;
	}

	desc.opcode = IPA_IP_V4_FILTER_INIT;
	v4_cmd.ipv4_rules_addr = mem.phys_base;
	v4_cmd.size_ipv4_rules = mem.size;
	v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes +
		IPA_v2_RAM_V4_FLT_OFST;
	IPADBG("putting Filtering IPv4 rules to phys 0x%x",
				v4_cmd.ipv4_addr);

	desc.pyld = &v4_cmd;
	desc.len = sizeof(struct ipa_ip_v4_filter_init);
	desc.type = IPA_IMM_CMD_DESC;
	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);

	if (ipa_send_cmd(1, &desc)) {
		IPAERR("fail to send immediate command\n");
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	return rc;
}

static int ipa_init_flt6(void)
{
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_ip_v6_filter_init v6_cmd;
	u32 *entry;
	int i;
	int rc = 0;

	mem.size = IPA_v2_RAM_V6_FLT_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
		return -ENOMEM;
	}

	entry = mem.base;

	*entry = (0xFFFFF << 1) | 0x1;
	entry++;

	for (i = 0; i <= IPA_NUM_PIPES; i++) {
		*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
		entry++;
	}

	desc.opcode = IPA_IP_V6_FILTER_INIT;
	v6_cmd.ipv6_rules_addr = mem.phys_base;
	v6_cmd.size_ipv6_rules = mem.size;
	v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes +
		IPA_v2_RAM_V6_FLT_OFST;
	IPADBG("putting Filtering IPv6 rules to phys 0x%x",
				v6_cmd.ipv6_addr);

	desc.pyld = &v6_cmd;
	desc.len = sizeof(struct ipa_ip_v6_filter_init);
	desc.type = IPA_IMM_CMD_DESC;
	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);

	if (ipa_send_cmd(1, &desc)) {
		IPAERR("fail to send immediate command\n");
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	return rc;
}

static int ipa_setup_apps_pipes(void)
{
	struct ipa_sys_connect_params sys_in;
@@ -782,6 +1022,15 @@ static int ipa_setup_apps_pipes(void)
	}
	IPADBG("Apps to IPA cmd pipe is connected\n");

	if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0) {
		ipa_init_sram();
		ipa_init_hdr();
		ipa_init_rt4();
		ipa_init_rt6();
		ipa_init_flt4();
		ipa_init_flt6();
	}

	if (ipa_setup_exception_path()) {
		IPAERR(":fail to setup excp path\n");
		result = -EPERM;
@@ -1575,15 +1824,6 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
		goto fail_mem_ctx;
	}

	IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
	       hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
	       ip6_flt_tbl_lcl);
	ipa_ctx->hdr_tbl_lcl = hdr_tbl_lcl;
	ipa_ctx->ip4_rt_tbl_lcl = ip4_rt_tbl_lcl;
	ipa_ctx->ip6_rt_tbl_lcl = ip6_rt_tbl_lcl;
	ipa_ctx->ip4_flt_tbl_lcl = ip4_flt_tbl_lcl;
	ipa_ctx->ip6_flt_tbl_lcl = ip6_flt_tbl_lcl;

	ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
	ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type;
	ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode;
@@ -1612,6 +1852,11 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
	}
	ipa_ctx->ctrl = ctrl;

	IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
	       ipa_ctx->hdr_tbl_lcl, ipa_ctx->ip4_rt_tbl_lcl,
	       ipa_ctx->ip6_rt_tbl_lcl, ipa_ctx->ip4_flt_tbl_lcl,
	       ipa_ctx->ip6_flt_tbl_lcl);

	/* get BUS handle */
	ipa_ctx->ipa_bus_hdl =
		msm_bus_scale_register_client(&ipa_bus_client_pdata);
@@ -1649,32 +1894,18 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
	IPADBG("IPA HW initialization sequence completed");

	ctrl->ipa_sram_read_settings();
	IPADBG("SRAM, base: 0x%x size: 0x%x, restricted bytes: 0x%x\n",
			ipa_ctx->ipa_wrapper_base +
			IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0),
	IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
		ipa_ctx->smem_sz, ipa_ctx->smem_restricted_bytes);

	if (IPA_RAM_REQUIRED >
	if (ipa_ctx->smem_reqd_sz >
		ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes) {
		IPAERR("SW expect more core memory, needed %d, avail %d\n",
				IPA_RAM_END_OFST, ipa_ctx->smem_sz);
			ipa_ctx->smem_reqd_sz, ipa_ctx->smem_sz -
			ipa_ctx->smem_restricted_bytes);
		result = -ENOMEM;
		goto fail_init_hw;
	}

	ctrl->sram_flt_ipv4_ofst  = IPA_RAM_V4_FLT_OFST +
			ipa_ctx->smem_restricted_bytes;
	ctrl->sram_flt_ipv4_ofst  = IPA_RAM_V6_FLT_OFST +
			ipa_ctx->smem_restricted_bytes;
	ctrl->sram_nat_ipv4_ofst = IPA_RAM_NAT_OFST +
			ipa_ctx->smem_restricted_bytes;
	ctrl->sram_rt_ipv4_ofst   = IPA_RAM_V4_RT_OFST +
			ipa_ctx->smem_restricted_bytes;
	ctrl->sram_rt_ipv6_ofst   = IPA_RAM_V6_RT_OFST +
			ipa_ctx->smem_restricted_bytes;
	ctrl->sram_hdr_ofst       = IPA_RAM_HDR_OFST +
			ipa_ctx->smem_restricted_bytes;

	/* register IPA with SPS driver */
	bam_props.phys_addr = resource_p->bam_mem_base;
	bam_props.virt_size = resource_p->bam_mem_size;
@@ -1843,21 +2074,6 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
		IPADBG("IPA-A2 HW bridge initialized");
	}

	/* setup the A5-IPA pipes */
	if (ipa_setup_apps_pipes()) {
		IPAERR(":failed to setup IPA-Apps pipes.\n");
		result = -ENODEV;
		goto fail_apps_pipes;
	}
	IPADBG("IPA System2Bam pipes were connected\n");

	if (ipa_init_flt_block()) {
		IPAERR("fail to setup dummy filter rules\n");
		result = -ENODEV;
		goto fail_empty_rt_tbl;
	}
	IPADBG("filter block was set with dummy filter rules");

	/*
	 * setup an empty routing table in system memory, this will be used
	 * to delete a routing table cleanly and safely
@@ -1872,12 +2088,27 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
		IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
				ipa_ctx->empty_rt_tbl_mem.size);
		result = -ENOMEM;
		goto fail_empty_rt_tbl;
		goto fail_apps_pipes;
	}
	memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
			ipa_ctx->empty_rt_tbl_mem.size);
	IPADBG("empty routing table was allocated in system memory");

	/* setup the A5-IPA pipes */
	if (ipa_setup_apps_pipes()) {
		IPAERR(":failed to setup IPA-Apps pipes.\n");
		result = -ENODEV;
		goto fail_empty_rt_tbl;
	}
	IPADBG("IPA System2Bam pipes were connected\n");

	if (ipa_init_flt_block()) {
		IPAERR("fail to setup dummy filter rules\n");
		result = -ENODEV;
		goto fail_empty_rt_tbl;
	}
	IPADBG("filter block was set with dummy filter rules");

	/* setup the IPA pipe mem pool */
	if (resource_p->ipa_pipe_mem_size)
		ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
@@ -1963,12 +2194,12 @@ fail_device_create:
fail_alloc_chrdev_region:
	if (ipa_ctx->pipe_mem_pool)
		gen_pool_destroy(ipa_ctx->pipe_mem_pool);
fail_empty_rt_tbl:
	ipa_teardown_apps_pipes();
	dma_free_coherent(NULL,
			  ipa_ctx->empty_rt_tbl_mem.size,
			  ipa_ctx->empty_rt_tbl_mem.base,
			  ipa_ctx->empty_rt_tbl_mem.phys_base);
fail_empty_rt_tbl:
	ipa_teardown_apps_pipes();
fail_apps_pipes:
	/*
	 * DMA pool need to be released only for IPA HW v1.0 only.
+2 −0
Original line number Diff line number Diff line
@@ -471,6 +471,8 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
		if (desc[i].type == IPA_IMM_CMD_DESC) {
			iovec->size = desc[i].opcode;
			iovec->flags |= SPS_IOVEC_FLAG_IMME;
			IPA_DUMP_BUFF(desc[i].pyld,
					tx_pkt->mem.phys_base, desc[i].len);
		} else {
			iovec->size = desc[i].len;
		}
+301 −53
Original line number Diff line number Diff line
@@ -168,56 +168,39 @@ static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz)
	return total_sz;
}

/**
 * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
 * @ip:	[in] the ip address family type
 * @mem:	[out] buffer to put the filtering table
 *
 * Returns:	0 on success, negative on failure
 */
int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base,
		u8 *hdr, u32 body_start_offset, u8 *hdr2, u32 *hdr_top)
{
	struct ipa_flt_tbl *tbl;
	struct ipa_flt_entry *entry;
	u32 hdr_top = 0;
	int i;
	u32 hdr_sz;
	u32 offset;
	u8 *hdr;
	u8 *body;
	u8 *base;
	struct ipa_mem_buffer flt_tbl_mem;
	u8 *ftbl_membody;

	mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
	mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
	*hdr_top = 0;
	body = base;

	if (mem->size == 0) {
		IPAERR("flt tbl empty ip=%d\n", ip);
		goto error;
#define IPA_WRITE_FLT_HDR(idx, val) {			\
	if (idx <= 5) {					\
		*((u32 *)hdr + 1 + idx) = val;		\
	} else if (idx >= 6 && idx <= 9) {		\
		WARN_ON(1);				\
	} else if (idx >= 10 && idx <= 19) {		\
		*((u32 *)hdr2 + idx - 10) = val;	\
	} else {					\
		WARN_ON(1);				\
	}						\
}
	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
			GFP_KERNEL);
	if (!mem->base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
		goto error;
	}

	memset(mem->base, 0, mem->size);

	/* build the flt tbl in the DMA buffer to submit to IPA HW */
	base = hdr = (u8 *)mem->base;
	body = base + hdr_sz;

	/* write a dummy header to move cursor */
	hdr = ipa_write_32(hdr_top, hdr);

	tbl = &ipa_ctx->glob_flt_tbl[ip];

	if (!list_empty(&tbl->head_flt_rule_list)) {
		hdr_top |= IPA_FLT_BIT_MASK;
		*hdr_top |= IPA_FLT_BIT_MASK;

		if (!tbl->in_sys) {
			offset = body - base;
			offset = body - base + body_start_offset;
			if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
				IPAERR("offset is not word multiple %d\n",
						offset);
@@ -227,6 +210,10 @@ int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
			offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
			/* rule is at an offset from base */
			offset |= IPA_FLT_BIT_MASK;

			if (hdr2)
				*(u32 *)hdr = offset;
			else
				hdr = ipa_write_32(offset, hdr);

			/* generate the rule-set */
@@ -264,6 +251,10 @@ int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
				IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
			ftbl_membody = flt_tbl_mem.base;
			memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);

			if (hdr2)
				*(u32 *)hdr = flt_tbl_mem.phys_base;
			else
				hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);

			/* generate the rule-set */
@@ -291,9 +282,10 @@ int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
		tbl = &ipa_ctx->flt_tbl[i][ip];
		if (!list_empty(&tbl->head_flt_rule_list)) {
			/* pipe "i" is at bit "i+1" */
			hdr_top |= (1 << (i + 1));
			*hdr_top |= (1 << (i + 1));

			if (!tbl->in_sys) {
				offset = body - base;
				offset = body - base + body_start_offset;
				if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
					IPAERR("ofst is not word multiple %d\n",
					       offset);
@@ -302,6 +294,10 @@ int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
				offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
				/* rule is at an offset from base */
				offset |= IPA_FLT_BIT_MASK;

				if (hdr2)
					IPA_WRITE_FLT_HDR(i, offset)
				else
					hdr = ipa_write_32(offset, hdr);

				/* generate the rule-set */
@@ -343,7 +339,13 @@ int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)

				ftbl_membody = flt_tbl_mem.base;
				memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
				hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);

				if (hdr2)
					IPA_WRITE_FLT_HDR(i,
						flt_tbl_mem.phys_base)
				else
					hdr = ipa_write_32(
						flt_tbl_mem.phys_base, hdr);

				/* generate the rule-set */
				list_for_each_entry(entry,
@@ -369,15 +371,68 @@ int ipa_generate_flt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
		}
	}

	return 0;

proc_err:
	return -EPERM;
}


/**
 * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
 * @ip:	[in] the ip address family type
 * @mem:	[out] buffer to put the filtering table
 *
 * Returns:	0 on success, negative on failure
 */
static int ipa_generate_flt_hw_tbl_v1(enum ipa_ip_type ip,
		struct ipa_mem_buffer *mem)
{
	u32 hdr_top = 0;
	u32 hdr_sz;
	u8 *hdr;
	u8 *body;
	u8 *base;

	mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
	mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);

	if (mem->size == 0) {
		IPAERR("flt tbl empty ip=%d\n", ip);
		goto error;
	}
	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
			GFP_KERNEL);
	if (!mem->base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
		goto error;
	}

	memset(mem->base, 0, mem->size);

	/* build the flt tbl in the DMA buffer to submit to IPA HW */
	base = hdr = (u8 *)mem->base;
	body = base + hdr_sz;

	/* write a dummy header to move cursor */
	hdr = ipa_write_32(hdr_top, hdr);

	if (ipa_generate_flt_hw_tbl_common(ip, body, hdr, hdr_sz, 0,
				&hdr_top)) {
		IPAERR("fail to generate FLT HW table\n");
		goto proc_err;
	}

	/* now write the hdr_top */
	ipa_write_32(hdr_top, base);

	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);

	return 0;

proc_err:
	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
	mem->base = NULL;
error:

	return -EPERM;
}

@@ -428,7 +483,7 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
	}
}

static int __ipa_commit_flt(enum ipa_ip_type ip)
int __ipa_commit_flt_v1(enum ipa_ip_type ip)
{
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer *mem;
@@ -445,10 +500,12 @@ static int __ipa_commit_flt(enum ipa_ip_type ip)
	}

	if (ip == IPA_IP_v4) {
		avail = IPA_RAM_V4_FLT_SIZE;
		avail = ipa_ctx->ip4_flt_tbl_lcl ? IPA_v1_RAM_V4_FLT_SIZE :
			IPA_RAM_V4_FLT_SIZE_DDR;
		size = sizeof(struct ipa_ip_v4_filter_init);
	} else {
		avail = IPA_RAM_V6_FLT_SIZE;
		avail = ipa_ctx->ip6_flt_tbl_lcl ? IPA_v1_RAM_V6_FLT_SIZE :
			IPA_RAM_V6_FLT_SIZE_DDR;
		size = sizeof(struct ipa_ip_v6_filter_init);
	}
	cmd = kmalloc(size, GFP_KERNEL);
@@ -457,7 +514,7 @@ static int __ipa_commit_flt(enum ipa_ip_type ip)
		goto fail_alloc_cmd;
	}

	if (ipa_generate_flt_hw_tbl(ip, mem)) {
	if (ipa_generate_flt_hw_tbl_v1(ip, mem)) {
		IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
		goto fail_hw_tbl_gen;
	}
@@ -472,13 +529,13 @@ static int __ipa_commit_flt(enum ipa_ip_type ip)
		desc.opcode = IPA_IP_V4_FILTER_INIT;
		v4->ipv4_rules_addr = mem->phys_base;
		v4->size_ipv4_rules = mem->size;
		v4->ipv4_addr = ipa_ctx->ctrl->sram_flt_ipv4_ofst;
		v4->ipv4_addr = IPA_v1_RAM_V4_FLT_OFST;
	} else {
		v6 = (struct ipa_ip_v6_filter_init *)cmd;
		desc.opcode = IPA_IP_V6_FILTER_INIT;
		v6->ipv6_rules_addr = mem->phys_base;
		v6->size_ipv6_rules = mem->size;
		v6->ipv6_addr = ipa_ctx->ctrl->sram_flt_ipv6_ofst;
		v6->ipv6_addr = IPA_v1_RAM_V6_FLT_OFST;
	}

	desc.pyld = cmd;
@@ -510,6 +567,197 @@ fail_alloc_mem:
	return -EPERM;
}

static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,
		struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head1,
		struct ipa_mem_buffer *head2)
{
	int i;
	u32 hdr_sz;
	int num_words;
	u32 *entr;
	u32 body_start_offset;
	u32 hdr_top;

	if (ip == IPA_IP_v4)
		body_start_offset = IPA_v2_RAM_APPS_V4_FLT_OFST -
			IPA_v2_RAM_V4_FLT_OFST;
	else
		body_start_offset = IPA_v2_RAM_APPS_V6_FLT_OFST -
			IPA_v2_RAM_V6_FLT_OFST;

	num_words = 7;
	head1->size = num_words * 4;
	head1->base = dma_alloc_coherent(NULL, head1->size, &head1->phys_base,
			GFP_KERNEL);
	if (!head1->base) {
		IPAERR("fail to alloc DMA buff of size %d\n", head1->size);
		goto err;
	}
	entr = (u32 *)head1->base;
	for (i = 0; i < num_words; i++) {
		*entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
		entr++;
	}

	num_words = 10;
	head2->size = num_words * 4;
	head2->base = dma_alloc_coherent(NULL, head2->size, &head2->phys_base,
			GFP_KERNEL);
	if (!head2->base) {
		IPAERR("fail to alloc DMA buff of size %d\n", head2->size);
		goto head_err;
	}
	entr = (u32 *)head2->base;
	for (i = 0; i < num_words; i++) {
		*entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
		entr++;
	}

	mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
	mem->size -= hdr_sz;
	mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);

	if (mem->size) {
		mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
			GFP_KERNEL);
		if (!mem->base) {
			IPAERR("fail to alloc DMA buff of size %d\n",
					mem->size);
			goto body_err;
		}
		memset(mem->base, 0, mem->size);
	}

	if (ipa_generate_flt_hw_tbl_common(ip, mem->base, head1->base,
				body_start_offset, head2->base, &hdr_top)) {
		IPAERR("fail to generate FLT HW table\n");
		goto proc_err;
	}

	IPADBG("HEAD1\n");
	IPA_DUMP_BUFF(head1->base, head1->phys_base, head1->size);
	IPADBG("HEAD2\n");
	IPA_DUMP_BUFF(head2->base, head2->phys_base, head2->size);
	if (mem->size) {
		IPADBG("BODY\n");
		IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
	}

	return 0;

proc_err:
	if (mem->size)
		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
body_err:
	dma_free_coherent(NULL, head2->size, head2->base, head2->phys_base);
head_err:
	dma_free_coherent(NULL, head1->size, head1->base, head1->phys_base);
err:
	return -EPERM;
}

int __ipa_commit_flt_v2(enum ipa_ip_type ip)
{
	struct ipa_desc desc[3];
	struct ipa_mem_buffer body;
	struct ipa_mem_buffer head1;
	struct ipa_mem_buffer head2;
	struct ipa_hw_imm_cmd_dma_shared_mem cmd1 = {0};
	struct ipa_hw_imm_cmd_dma_shared_mem cmd2 = {0};
	struct ipa_hw_imm_cmd_dma_shared_mem cmd3 = {0};
	u16 avail;
	int rc = 0;
	u32 local_addr1;
	u32 local_addr2;
	u32 local_addr3;
	bool lcl;

	memset(desc, 0, 3 * sizeof(struct ipa_desc));

	if (ip == IPA_IP_v4) {
		avail = ipa_ctx->ip4_flt_tbl_lcl ? IPA_v2_RAM_APPS_V4_FLT_SIZE :
			IPA_RAM_V4_FLT_SIZE_DDR;
		local_addr1 = ipa_ctx->smem_restricted_bytes +
			IPA_v2_RAM_V4_FLT_OFST + 4;
		local_addr2 = ipa_ctx->smem_restricted_bytes +
			IPA_v2_RAM_V4_FLT_OFST + 12 * 4;
		local_addr3 = ipa_ctx->smem_restricted_bytes +
			IPA_v2_RAM_APPS_V4_FLT_OFST;
		lcl = ipa_ctx->ip4_flt_tbl_lcl;
	} else {
		avail = ipa_ctx->ip6_flt_tbl_lcl ? IPA_v2_RAM_APPS_V6_FLT_SIZE :
			IPA_RAM_V6_FLT_SIZE_DDR;
		local_addr1 = ipa_ctx->smem_restricted_bytes +
			IPA_v2_RAM_V6_FLT_OFST + 4;
		local_addr2 = ipa_ctx->smem_restricted_bytes +
			IPA_v2_RAM_V6_FLT_OFST + 12 * 4;
		local_addr3 = ipa_ctx->smem_restricted_bytes +
			IPA_v2_RAM_APPS_V6_FLT_OFST;
		lcl = ipa_ctx->ip6_flt_tbl_lcl;
	}

	if (ipa_generate_flt_hw_tbl_v2(ip, &body, &head1, &head2)) {
		IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
		rc = -EFAULT;
		goto fail_gen;
	}

	if (body.size > avail) {
		IPAERR("tbl too big, needed %d avail %d\n", body.size, avail);
		goto fail_send_cmd;
	}

	cmd1.size = head1.size;
	cmd1.system_addr = head1.phys_base;
	cmd1.local_addr = local_addr1;

	desc[0].opcode = IPA_DMA_SHARED_MEM;
	desc[0].pyld = &cmd1;
	desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
	desc[0].type = IPA_IMM_CMD_DESC;

	cmd2.size = head2.size;
	cmd2.system_addr = head2.phys_base;
	cmd2.local_addr = local_addr2;

	desc[1].opcode = IPA_DMA_SHARED_MEM;
	desc[1].pyld = &cmd2;
	desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
	desc[1].type = IPA_IMM_CMD_DESC;

	if (lcl) {
		cmd3.size = body.size;
		cmd3.system_addr = body.phys_base;
		cmd3.local_addr = local_addr3;

		desc[2].opcode = IPA_DMA_SHARED_MEM;
		desc[2].pyld = &cmd3;
		desc[2].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
		desc[2].type = IPA_IMM_CMD_DESC;

		if (ipa_send_cmd(3, desc)) {
			IPAERR("fail to send immediate command\n");
			rc = -EFAULT;
			goto fail_send_cmd;
		}
	} else {
		if (ipa_send_cmd(2, desc)) {
			IPAERR("fail to send immediate command\n");
			rc = -EFAULT;
			goto fail_send_cmd;
		}
	}

	__ipa_reap_sys_flt_tbls(ip);
fail_send_cmd:
	if (body.size)
		dma_free_coherent(NULL, body.size, body.base, body.phys_base);
	dma_free_coherent(NULL, head1.size, head1.base, head1.phys_base);
	dma_free_coherent(NULL, head2.size, head2.base, head2.phys_base);
fail_gen:
	return rc;
}

static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
			      const struct ipa_flt_rule *rule, u8 add_rear,
			      u32 *rule_hdl)
@@ -695,7 +943,7 @@ int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
	}

	if (rules->commit)
		if (__ipa_commit_flt(rules->ip)) {
		if (ipa_ctx->ctrl->ipa_commit_flt(rules->ip)) {
			result = -EPERM;
			goto bail;
		}
@@ -736,7 +984,7 @@ int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
	}

	if (hdls->commit)
		if (__ipa_commit_flt(hdls->ip)) {
		if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) {
			mutex_unlock(&ipa_ctx->lock);
			result = -EPERM;
			goto bail;
@@ -769,7 +1017,7 @@ int ipa_commit_flt(enum ipa_ip_type ip)

	mutex_lock(&ipa_ctx->lock);

	if (__ipa_commit_flt(ip)) {
	if (ipa_ctx->ctrl->ipa_commit_flt(ip)) {
		result = -EPERM;
		goto bail;
	}
+83 −32

File changed.

Preview size limit exceeded, changes collapsed.

+13 −0
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@
#define IPA_NAT_DMA           (14)
#define IPA_IP_PACKET_TAG     (15)
#define IPA_IP_PACKET_INIT    (16)
#define IPA_DMA_SHARED_MEM    (19)

#define IPA_INTERFACE_ID_EXCEPTION         (0)
#define IPA_INTERFACE_ID_A2_WWAN        (0x10)
@@ -354,4 +355,16 @@ enum ipa_hw_pkt_status_exception {
	IPA_HW_PKT_STATUS_EXCEPTION_MAX            = 0xFF
};

/*! @brief IPA_HW_IMM_CMD_DMA_SHARED_MEM Immediate Command Parameters */
struct ipa_hw_imm_cmd_dma_shared_mem {
	u32 reserved_1:16;
	u32 size:16;
	u32 system_addr:32;
	u32 local_addr:16;
	u32 direction:1;
	u32 skip_pipeline_clear:1;
	u32 reserved_2:14;
	u32 padding:32;
};

#endif /* _IPA_HW_DEFS_H */
Loading