Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3410c817 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: clean up DMA API calls"

parents a6a3d0f2 56937be2
Loading
Loading
Loading
Loading
+18 −16
Original line number Diff line number Diff line
@@ -825,7 +825,7 @@ static int ipa_init_sram(void)
	iounmap(ipa_sram_mmio);

	mem.size = IPA_STATUS_CLEAR_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
@@ -846,7 +846,7 @@ static int ipa_init_sram(void)
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
	return rc;
}

@@ -858,7 +858,7 @@ static int ipa_init_hdr(void)
	int rc = 0;

	mem.size = IPA_v2_RAM_MODEM_HDR_SIZE + IPA_v2_RAM_APPS_HDR_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
@@ -882,7 +882,7 @@ static int ipa_init_hdr(void)
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
	return rc;
}

@@ -901,7 +901,7 @@ static int ipa_init_rt4(void)
	IPADBG("v4 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v4]);

	mem.size = IPA_v2_RAM_V4_RT_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
@@ -932,7 +932,7 @@ static int ipa_init_rt4(void)
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
	return rc;
}

@@ -951,7 +951,7 @@ static int ipa_init_rt6(void)
	IPADBG("v6 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v6]);

	mem.size = IPA_v2_RAM_V6_RT_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
@@ -982,7 +982,7 @@ static int ipa_init_rt6(void)
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
	return rc;
}

@@ -996,7 +996,7 @@ static int ipa_init_flt4(void)
	int rc = 0;

	mem.size = IPA_v2_RAM_V4_FLT_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
@@ -1031,7 +1031,7 @@ static int ipa_init_flt4(void)
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
	return rc;
}

@@ -1045,7 +1045,7 @@ static int ipa_init_flt6(void)
	int rc = 0;

	mem.size = IPA_v2_RAM_V6_FLT_SIZE;
	mem.base = dma_alloc_coherent(NULL, mem.size, &mem.phys_base,
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
			GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
@@ -1080,7 +1080,7 @@ static int ipa_init_flt6(void)
		rc = -EFAULT;
	}

	dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
	return rc;
}

@@ -1742,6 +1742,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
		goto fail_mem_ctx;
	}

	ipa_ctx->pdev = ipa_dev;
	ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
	ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type;
	ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode;
@@ -1917,11 +1918,11 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
	 */
	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
		ipa_ctx->dma_pool = dma_pool_create("ipa_1k",
				NULL,
				ipa_ctx->pdev,
				IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
				IPA_DMA_POOL_BOUNDARY);
	} else {
		ipa_ctx->dma_pool = dma_pool_create("ipa_tx", NULL,
		ipa_ctx->dma_pool = dma_pool_create("ipa_tx", ipa_ctx->pdev,
			IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
			0, 0);
	}
@@ -2003,7 +2004,8 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
	ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;

	ipa_ctx->empty_rt_tbl_mem.base =
		dma_alloc_coherent(NULL, ipa_ctx->empty_rt_tbl_mem.size,
		dma_alloc_coherent(ipa_ctx->pdev,
				ipa_ctx->empty_rt_tbl_mem.size,
				    &ipa_ctx->empty_rt_tbl_mem.phys_base,
				    GFP_KERNEL);
	if (!ipa_ctx->empty_rt_tbl_mem.base) {
@@ -2127,7 +2129,7 @@ fail_alloc_chrdev_region:
		gen_pool_destroy(ipa_ctx->pipe_mem_pool);
fail_empty_rt_tbl:
	ipa_teardown_apps_pipes();
	dma_free_coherent(NULL,
	dma_free_coherent(ipa_ctx->pdev,
			  ipa_ctx->empty_rt_tbl_mem.size,
			  ipa_ctx->empty_rt_tbl_mem.base,
			  ipa_ctx->empty_rt_tbl_mem.phys_base);
+6 −6
Original line number Diff line number Diff line
@@ -144,7 +144,7 @@ static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in,
			IPAERR("FIFO pipe mem alloc fail ep %u\n",
				ipa_ep_idx);
			mem_buff_ptr->base =
				dma_alloc_coherent(NULL,
				dma_alloc_coherent(ipa_ctx->pdev,
				mem_buff_ptr->size,
				&dma_addr, GFP_KERNEL);
		} else {
@@ -158,7 +158,7 @@ static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in,
		}
	} else {
		mem_buff_ptr->base =
			dma_alloc_coherent(NULL, mem_buff_ptr->size,
			dma_alloc_coherent(ipa_ctx->pdev, mem_buff_ptr->size,
			&dma_addr, GFP_KERNEL);
	}
	mem_buff_ptr->phys_base = dma_addr;
@@ -335,7 +335,7 @@ int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,

sps_connect_fail:
	if (!ep->data_fifo_in_pipe_mem)
		dma_free_coherent(NULL,
		dma_free_coherent(ipa_ctx->pdev,
				  ep->connect.data.size,
				  ep->connect.data.base,
				  ep->connect.data.phys_base);
@@ -345,7 +345,7 @@ sps_connect_fail:

data_mem_alloc_fail:
	if (!ep->desc_fifo_in_pipe_mem)
		dma_free_coherent(NULL,
		dma_free_coherent(ipa_ctx->pdev,
				  ep->connect.desc.size,
				  ep->connect.desc.base,
				  ep->connect.desc.phys_base);
@@ -408,7 +408,7 @@ int ipa_disconnect(u32 clnt_hdl)
	if (!ep->desc_fifo_client_allocated &&
	     ep->connect.desc.base) {
		if (!ep->desc_fifo_in_pipe_mem)
			dma_free_coherent(NULL,
			dma_free_coherent(ipa_ctx->pdev,
					  ep->connect.desc.size,
					  ep->connect.desc.base,
					  ep->connect.desc.phys_base);
@@ -420,7 +420,7 @@ int ipa_disconnect(u32 clnt_hdl)
	if (!ep->data_fifo_client_allocated &&
	     ep->connect.data.base) {
		if (!ep->data_fifo_in_pipe_mem)
			dma_free_coherent(NULL,
			dma_free_coherent(ipa_ctx->pdev,
					  ep->connect.data.size,
					  ep->connect.data.base,
					  ep->connect.data.phys_base);
+15 −14
Original line number Diff line number Diff line
@@ -71,7 +71,7 @@ static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
						   link);
		list_del(&tx_pkt_expected->link);
		sys->len--;
		dma_unmap_single(NULL, tx_pkt_expected->mem.phys_base,
		dma_unmap_single(ipa_ctx->pdev, tx_pkt_expected->mem.phys_base,
				tx_pkt_expected->mem.size,
				DMA_TO_DEVICE);
		if (tx_pkt_expected->callback)
@@ -291,8 +291,8 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
			memcpy(tx_pkt->bounce, desc->pyld, desc->len);
		}
	} else {
		dma_address = dma_map_single(NULL, desc->pyld, desc->len,
				DMA_TO_DEVICE);
		dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld,
				desc->len, DMA_TO_DEVICE);
	}
	if (!dma_address) {
		IPAERR("failed to DMA wrap\n");
@@ -347,7 +347,8 @@ fail_sps_send:
		dma_pool_free(ipa_ctx->dma_pool, tx_pkt->bounce,
				dma_address);
	else
		dma_unmap_single(NULL, dma_address, desc->len, DMA_TO_DEVICE);
		dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len,
				DMA_TO_DEVICE);
fail_dma_map:
	kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
fail_mem_alloc:
@@ -456,7 +457,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
			}
		} else {
			tx_pkt->mem.phys_base =
			   dma_map_single(NULL, tx_pkt->mem.base,
			   dma_map_single(ipa_ctx->pdev, tx_pkt->mem.base,
					   tx_pkt->mem.size,
					   DMA_TO_DEVICE);
		}
@@ -517,7 +518,7 @@ failure:
					tx_pkt->bounce,
					tx_pkt->mem.phys_base);
		else
			dma_unmap_single(NULL, tx_pkt->mem.phys_base,
			dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
					tx_pkt->mem.size,
					DMA_TO_DEVICE);
		kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
@@ -1015,8 +1016,8 @@ int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)

	ep->connect.options = ep->sys->sps_option;
	ep->connect.desc.size = sys_in->desc_fifo_sz;
	ep->connect.desc.base = dma_alloc_coherent(NULL, ep->connect.desc.size,
			&dma_addr, 0);
	ep->connect.desc.base = dma_alloc_coherent(ipa_ctx->pdev,
			ep->connect.desc.size, &dma_addr, 0);
	ep->connect.desc.phys_base = dma_addr;
	if (ep->connect.desc.base == NULL) {
		IPAERR("fail to get DMA desc memory.\n");
@@ -1065,7 +1066,7 @@ int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
fail_register_event:
	sps_disconnect(ep->ep_hdl);
fail_sps_connect:
	dma_free_coherent(NULL, ep->connect.desc.size,
	dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
			  ep->connect.desc.base,
			  ep->connect.desc.phys_base);
fail_sps_cfg:
@@ -1102,7 +1103,7 @@ int ipa_teardown_sys_pipe(u32 clnt_hdl)

	ipa_disable_data_path(clnt_hdl);
	sps_disconnect(ep->ep_hdl);
	dma_free_coherent(NULL, ep->connect.desc.size,
	dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
			  ep->connect.desc.base,
			  ep->connect.desc.phys_base);
	sps_free_endpoint(ep->ep_hdl);
@@ -1425,7 +1426,7 @@ static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
			goto fail_skb_alloc;
		}
		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
		rx_pkt->data.dma_addr = dma_map_single(NULL, ptr,
		rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
						     sys->rx_buff_sz,
						     DMA_FROM_DEVICE);
		if (rx_pkt->data.dma_addr == 0 ||
@@ -1452,7 +1453,7 @@ static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
fail_sps_transfer:
	list_del(&rx_pkt->link);
	rx_len_cached = --sys->len;
	dma_unmap_single(NULL, rx_pkt->data.dma_addr,
	dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
			sys->rx_buff_sz, DMA_FROM_DEVICE);
fail_dma_mapping:
	sys->free_skb(rx_pkt->data.skb);
@@ -1485,7 +1486,7 @@ static void ipa_cleanup_rx(struct ipa_sys_context *sys)
	list_for_each_entry_safe(rx_pkt, r,
				 &sys->head_desc_list, link) {
		list_del(&rx_pkt->link);
		dma_unmap_single(NULL, rx_pkt->data.dma_addr,
		dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
			sys->rx_buff_sz, DMA_FROM_DEVICE);
		sys->free_skb(rx_pkt->data.skb);
		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
@@ -1962,7 +1963,7 @@ static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
	if (size)
		rx_pkt_expected->len = size;
	rx_skb = rx_pkt_expected->data.skb;
	dma_unmap_single(NULL, rx_pkt_expected->data.dma_addr,
	dma_unmap_single(ipa_ctx->pdev, rx_pkt_expected->data.dma_addr,
			sys->rx_buff_sz, DMA_FROM_DEVICE);
	rx_skb->tail = rx_skb->data + rx_pkt_expected->len;
	rx_skb->len = rx_pkt_expected->len;
+33 −24
Original line number Diff line number Diff line
@@ -376,7 +376,7 @@ static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base,
			/* allocate memory for the flt tbl */
			flt_tbl_mem.size = tbl->sz;
			flt_tbl_mem.base =
			   dma_alloc_coherent(NULL, flt_tbl_mem.size,
			   dma_alloc_coherent(ipa_ctx->pdev, flt_tbl_mem.size,
					   &flt_tbl_mem.phys_base, GFP_KERNEL);
			if (!flt_tbl_mem.base) {
				IPAERR("fail to alloc DMA buff of size %d\n",
@@ -462,7 +462,8 @@ static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base,
				/* allocate memory for the flt tbl */
				flt_tbl_mem.size = tbl->sz;
				flt_tbl_mem.base =
				   dma_alloc_coherent(NULL, flt_tbl_mem.size,
				   dma_alloc_coherent(ipa_ctx->pdev,
						   flt_tbl_mem.size,
						   &flt_tbl_mem.phys_base,
						   GFP_KERNEL);
				if (!flt_tbl_mem.base) {
@@ -539,8 +540,8 @@ static int ipa_generate_flt_hw_tbl_v1(enum ipa_ip_type ip,
		IPAERR("flt tbl empty ip=%d\n", ip);
		goto error;
	}
	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
			GFP_KERNEL);
	mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
			&mem->phys_base, GFP_KERNEL);
	if (!mem->base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
		goto error;
@@ -569,7 +570,7 @@ static int ipa_generate_flt_hw_tbl_v1(enum ipa_ip_type ip,
	return 0;

proc_err:
	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
	dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
error:
	return -EPERM;
}
@@ -582,15 +583,15 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
	tbl = &ipa_ctx->glob_flt_tbl[ip];
	if (tbl->prev_mem.phys_base) {
		IPADBG("reaping glob flt tbl (prev) ip=%d\n", ip);
		dma_free_coherent(NULL, tbl->prev_mem.size, tbl->prev_mem.base,
				tbl->prev_mem.phys_base);
		dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
				tbl->prev_mem.base, tbl->prev_mem.phys_base);
		memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
	}

	if (list_empty(&tbl->head_flt_rule_list)) {
		if (tbl->curr_mem.phys_base) {
			IPADBG("reaping glob flt tbl (curr) ip=%d\n", ip);
			dma_free_coherent(NULL, tbl->curr_mem.size,
			dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size,
					tbl->curr_mem.base,
					tbl->curr_mem.phys_base);
			memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem));
@@ -601,7 +602,7 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
		tbl = &ipa_ctx->flt_tbl[i][ip];
		if (tbl->prev_mem.phys_base) {
			IPADBG("reaping flt tbl (prev) pipe=%d ip=%d\n", i, ip);
			dma_free_coherent(NULL, tbl->prev_mem.size,
			dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
					tbl->prev_mem.base,
					tbl->prev_mem.phys_base);
			memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
@@ -611,7 +612,8 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
			if (tbl->curr_mem.phys_base) {
				IPADBG("reaping flt tbl (curr) pipe=%d ip=%d\n",
						i, ip);
				dma_free_coherent(NULL, tbl->curr_mem.size,
				dma_free_coherent(ipa_ctx->pdev,
						tbl->curr_mem.size,
						tbl->curr_mem.base,
						tbl->curr_mem.phys_base);
				memset(&tbl->curr_mem, 0,
@@ -687,7 +689,7 @@ int __ipa_commit_flt_v1(enum ipa_ip_type ip)
	}

	__ipa_reap_sys_flt_tbls(ip);
	dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
	dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
	kfree(cmd);
	kfree(mem);

@@ -695,7 +697,8 @@ int __ipa_commit_flt_v1(enum ipa_ip_type ip)

fail_send_cmd:
	if (mem->phys_base)
		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
				mem->phys_base);
fail_hw_tbl_gen:
	kfree(cmd);
fail_alloc_cmd:
@@ -725,8 +728,8 @@ static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,

	num_words = 7;
	head1->size = num_words * 4;
	head1->base = dma_alloc_coherent(NULL, head1->size, &head1->phys_base,
			GFP_KERNEL);
	head1->base = dma_alloc_coherent(ipa_ctx->pdev, head1->size,
			&head1->phys_base, GFP_KERNEL);
	if (!head1->base) {
		IPAERR("fail to alloc DMA buff of size %d\n", head1->size);
		goto err;
@@ -739,8 +742,8 @@ static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,

	num_words = 10;
	head2->size = num_words * 4;
	head2->base = dma_alloc_coherent(NULL, head2->size, &head2->phys_base,
			GFP_KERNEL);
	head2->base = dma_alloc_coherent(ipa_ctx->pdev, head2->size,
			&head2->phys_base, GFP_KERNEL);
	if (!head2->base) {
		IPAERR("fail to alloc DMA buff of size %d\n", head2->size);
		goto head_err;
@@ -756,8 +759,8 @@ static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,
	mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);

	if (mem->size) {
		mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
			GFP_KERNEL);
		mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
				&mem->phys_base, GFP_KERNEL);
		if (!mem->base) {
			IPAERR("fail to alloc DMA buff of size %d\n",
					mem->size);
@@ -785,11 +788,14 @@ static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,

proc_err:
	if (mem->size)
		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
				mem->phys_base);
body_err:
	dma_free_coherent(NULL, head2->size, head2->base, head2->phys_base);
	dma_free_coherent(ipa_ctx->pdev, head2->size, head2->base,
			head2->phys_base);
head_err:
	dma_free_coherent(NULL, head1->size, head1->base, head1->phys_base);
	dma_free_coherent(ipa_ctx->pdev, head1->size, head1->base,
			head1->phys_base);
err:
	return -EPERM;
}
@@ -889,9 +895,12 @@ int __ipa_commit_flt_v2(enum ipa_ip_type ip)
	__ipa_reap_sys_flt_tbls(ip);
fail_send_cmd:
	if (body.size)
		dma_free_coherent(NULL, body.size, body.base, body.phys_base);
	dma_free_coherent(NULL, head1.size, head1.base, head1.phys_base);
	dma_free_coherent(NULL, head2.size, head2.base, head2.phys_base);
		dma_free_coherent(ipa_ctx->pdev, body.size, body.base,
				body.phys_base);
	dma_free_coherent(ipa_ctx->pdev, head1.size, head1.base,
			head1.phys_base);
	dma_free_coherent(ipa_ctx->pdev, head2.size, head2.base,
			head2.phys_base);
fail_gen:
	return rc;
}
+11 −8
Original line number Diff line number Diff line
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -32,8 +32,8 @@ static int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
	}
	IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end);

	mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
			GFP_KERNEL);
	mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
			&mem->phys_base, GFP_KERNEL);
	if (!mem->base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
		return -ENOMEM;
@@ -119,10 +119,11 @@ int __ipa_commit_hdr_v1(void)
	}

	if (ipa_ctx->hdr_tbl_lcl) {
		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
				mem->phys_base);
	} else {
		if (ipa_ctx->hdr_mem.phys_base) {
			dma_free_coherent(NULL, ipa_ctx->hdr_mem.size,
			dma_free_coherent(ipa_ctx->pdev, ipa_ctx->hdr_mem.size,
					  ipa_ctx->hdr_mem.base,
					  ipa_ctx->hdr_mem.phys_base);
		}
@@ -135,7 +136,8 @@ int __ipa_commit_hdr_v1(void)

fail_send_cmd:
	if (mem->base)
		dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
				mem->phys_base);
fail_hw_tbl_gen:
	kfree(cmd);
fail_alloc_cmd:
@@ -195,10 +197,11 @@ int __ipa_commit_hdr_v2(void)
		rc = 0;

	if (ipa_ctx->hdr_tbl_lcl) {
		dma_free_coherent(NULL, mem.size, mem.base, mem.phys_base);
		dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
				mem.phys_base);
	} else {
		if (!rc && ipa_ctx->hdr_mem.phys_base) {
			dma_free_coherent(NULL, ipa_ctx->hdr_mem.size,
			dma_free_coherent(ipa_ctx->pdev, ipa_ctx->hdr_mem.size,
					  ipa_ctx->hdr_mem.base,
					  ipa_ctx->hdr_mem.phys_base);
			ipa_ctx->hdr_mem = mem;
Loading