Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b97875e2 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: Fix to use GFP_DMA and add check for dma_map_single" into msm-4.9

parents 702aa97c e6510101
Loading
Loading
Loading
Loading
+17 −9
Original line number Diff line number Diff line
@@ -1584,6 +1584,7 @@ static int ipa_init_smem_region(int memory_region_size,
	struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
	struct ipa_desc desc;
	struct ipa_mem_buffer mem;
	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	int rc;

	if (memory_region_size == 0)
@@ -1603,7 +1604,7 @@ static int ipa_init_smem_region(int memory_region_size,
	memset(mem.base, 0, mem.size);

	cmd = kzalloc(sizeof(*cmd),
		GFP_KERNEL);
		flag);
	if (cmd == NULL) {
		IPAERR("Failed to alloc immediate command object\n");
		rc = -ENOMEM;
@@ -2166,6 +2167,7 @@ int _ipa_init_sram_v2(void)
	struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
	struct ipa_desc desc = {0};
	struct ipa_mem_buffer mem;
	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	int rc = 0;

	phys_addr = ipa_ctx->ipa_wrapper_base +
@@ -2203,7 +2205,7 @@ int _ipa_init_sram_v2(void)
	}
	memset(mem.base, 0, mem.size);

	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
	cmd = kzalloc(sizeof(*cmd), flag);
	if (cmd == NULL) {
		IPAERR("Failed to alloc immediate command object\n");
		rc = -ENOMEM;
@@ -2314,6 +2316,7 @@ int _ipa_init_hdr_v2(void)
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_hdr_init_local *cmd = NULL;
	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	int rc = 0;

	mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
@@ -2325,7 +2328,7 @@ int _ipa_init_hdr_v2(void)
	}
	memset(mem.base, 0, mem.size);

	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
	cmd = kzalloc(sizeof(*cmd), flag);
	if (cmd == NULL) {
		IPAERR("Failed to alloc header init command object\n");
		rc = -ENOMEM;
@@ -2360,6 +2363,7 @@ int _ipa_init_hdr_v2_5(void)
	struct ipa_mem_buffer mem;
	struct ipa_hdr_init_local *cmd = NULL;
	struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);

	mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
@@ -2370,7 +2374,7 @@ int _ipa_init_hdr_v2_5(void)
	}
	memset(mem.base, 0, mem.size);

	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
	cmd = kzalloc(sizeof(*cmd), flag);
	if (cmd == NULL) {
		IPAERR("Failed to alloc header init command object\n");
		dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
@@ -2411,7 +2415,7 @@ int _ipa_init_hdr_v2_5(void)
	memset(mem.base, 0, mem.size);
	memset(&desc, 0, sizeof(desc));

	dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_KERNEL);
	dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
	if (dma_cmd == NULL) {
		IPAERR("Failed to alloc immediate command object\n");
		dma_free_coherent(ipa_ctx->pdev,
@@ -2462,6 +2466,7 @@ int _ipa_init_rt4_v2(void)
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_ip_v4_routing_init *v4_cmd = NULL;
	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	u32 *entry;
	int i;
	int rc = 0;
@@ -2486,7 +2491,7 @@ int _ipa_init_rt4_v2(void)
		entry++;
	}

	v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
	v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
	if (v4_cmd == NULL) {
		IPAERR("Failed to alloc v4 routing init command object\n");
		rc = -ENOMEM;
@@ -2522,6 +2527,7 @@ int _ipa_init_rt6_v2(void)
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_ip_v6_routing_init *v6_cmd = NULL;
	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	u32 *entry;
	int i;
	int rc = 0;
@@ -2546,7 +2552,7 @@ int _ipa_init_rt6_v2(void)
		entry++;
	}

	v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
	v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
	if (v6_cmd == NULL) {
		IPAERR("Failed to alloc v6 routing init command object\n");
		rc = -ENOMEM;
@@ -2582,6 +2588,7 @@ int _ipa_init_flt4_v2(void)
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_ip_v4_filter_init *v4_cmd = NULL;
	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	u32 *entry;
	int i;
	int rc = 0;
@@ -2604,7 +2611,7 @@ int _ipa_init_flt4_v2(void)
		entry++;
	}

	v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
	v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
	if (v4_cmd == NULL) {
		IPAERR("Failed to alloc v4 fliter init command object\n");
		rc = -ENOMEM;
@@ -2640,6 +2647,7 @@ int _ipa_init_flt6_v2(void)
	struct ipa_desc desc = { 0 };
	struct ipa_mem_buffer mem;
	struct ipa_ip_v6_filter_init *v6_cmd = NULL;
	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	u32 *entry;
	int i;
	int rc = 0;
@@ -2662,7 +2670,7 @@ int _ipa_init_flt6_v2(void)
		entry++;
	}

	v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
	v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
	if (v6_cmd == NULL) {
		IPAERR("Failed to alloc v6 fliter init command object\n");
		rc = -ENOMEM;
+21 −11
Original line number Diff line number Diff line
@@ -322,8 +322,8 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
		dma_address = desc->dma_address;
		tx_pkt->no_unmap_dma = true;
	}
	if (!dma_address) {
		IPAERR("failed to DMA wrap\n");
	if (dma_mapping_error(ipa_ctx->pdev, dma_address)) {
		IPAERR("dma_map_single failed\n");
		goto fail_dma_map;
	}

@@ -445,7 +445,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
		}
		dma_addr  = dma_map_single(ipa_ctx->pdev,
				transfer.iovec, size, DMA_TO_DEVICE);
		if (!dma_addr) {
		if (dma_mapping_error(ipa_ctx->pdev, dma_addr)) {
			IPAERR("dma_map_single failed for sps xfr buff\n");
			kfree(transfer.iovec);
			return -EFAULT;
@@ -493,6 +493,15 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
					tx_pkt->mem.base,
					tx_pkt->mem.size,
					DMA_TO_DEVICE);

				if (dma_mapping_error(ipa_ctx->pdev,
					tx_pkt->mem.phys_base)) {
					IPAERR("dma_map_single ");
					IPAERR("failed\n");
					fail_dma_wrap = 1;
					goto failure;
				}

			} else {
				tx_pkt->mem.phys_base = desc[i].dma_address;
				tx_pkt->no_unmap_dma = true;
@@ -1873,8 +1882,8 @@ static void ipa_wq_repl_rx(struct work_struct *work)
		rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
						     sys->rx_buff_sz,
						     DMA_FROM_DEVICE);
		if (rx_pkt->data.dma_addr == 0 ||
				rx_pkt->data.dma_addr == ~0) {
		if (dma_mapping_error(ipa_ctx->pdev,
				rx_pkt->data.dma_addr)) {
			pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
			       __func__, (void *)rx_pkt->data.dma_addr,
			       ptr, sys);
@@ -2029,8 +2038,8 @@ static void ipa_alloc_wlan_rx_common_cache(u32 size)
		ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
		rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
				IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
		if (rx_pkt->data.dma_addr == 0 ||
				rx_pkt->data.dma_addr == ~0) {
		if (dma_mapping_error(ipa_ctx->pdev,
				rx_pkt->data.dma_addr)) {
			IPAERR("dma_map_single failure %p for %p\n",
			       (void *)rx_pkt->data.dma_addr, ptr);
			goto fail_dma_mapping;
@@ -2101,8 +2110,8 @@ static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
		rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
						     sys->rx_buff_sz,
						     DMA_FROM_DEVICE);
		if (rx_pkt->data.dma_addr == 0 ||
				rx_pkt->data.dma_addr == ~0) {
		if (dma_mapping_error(ipa_ctx->pdev,
				rx_pkt->data.dma_addr)) {
			IPAERR("dma_map_single failure %p for %p\n",
			       (void *)rx_pkt->data.dma_addr, ptr);
			goto fail_dma_mapping;
@@ -2159,9 +2168,10 @@ static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
		rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
			ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
		if (rx_pkt->data.dma_addr == 0 ||
			rx_pkt->data.dma_addr == ~0)
		if (dma_mapping_error(ipa_ctx->pdev, rx_pkt->data.dma_addr)) {
			IPAERR("dma_map_single failure for rx_pkt\n");
			goto fail_dma_mapping;
		}

		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
		rx_len_cached = ++sys->len;
+15 −6
Original line number Diff line number Diff line
@@ -268,6 +268,7 @@ int __ipa_commit_hdr_v2(void)
	struct ipa_mem_buffer mem;
	struct ipa_hdr_init_system *cmd = NULL;
	struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
	gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	int rc = -EFAULT;

	if (ipa_generate_hdr_hw_tbl(&mem)) {
@@ -281,7 +282,7 @@ int __ipa_commit_hdr_v2(void)
				IPA_MEM_PART(apps_hdr_size));
			goto fail_send_cmd;
		} else {
			dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_ATOMIC);
			dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
			if (dma_cmd == NULL) {
				IPAERR("fail to alloc immediate cmd\n");
				rc = -ENOMEM;
@@ -303,7 +304,7 @@ int __ipa_commit_hdr_v2(void)
				IPA_MEM_PART(apps_hdr_size_ddr));
			goto fail_send_cmd;
		} else {
			cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
			cmd = kzalloc(sizeof(*cmd), flag);
			if (cmd == NULL) {
				IPAERR("fail to alloc hdr init cmd\n");
				rc = -ENOMEM;
@@ -359,6 +360,7 @@ int __ipa_commit_hdr_v2_5(void)
	struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL;
	struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL;
	struct ipa_register_write *reg_write_cmd = NULL;
	gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	int rc = -EFAULT;
	u32 proc_ctx_size;
	u32 proc_ctx_ofst;
@@ -383,7 +385,7 @@ int __ipa_commit_hdr_v2_5(void)
				IPA_MEM_PART(apps_hdr_size));
			goto fail_send_cmd1;
		} else {
			dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), GFP_ATOMIC);
			dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), flag);
			if (dma_cmd_hdr == NULL) {
				IPAERR("fail to alloc immediate cmd\n");
				rc = -ENOMEM;
@@ -406,7 +408,7 @@ int __ipa_commit_hdr_v2_5(void)
			goto fail_send_cmd1;
		} else {
			hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd),
				GFP_ATOMIC);
				flag);
			if (hdr_init_cmd == NULL) {
				IPAERR("fail to alloc immediate cmd\n");
				rc = -ENOMEM;
@@ -431,7 +433,7 @@ int __ipa_commit_hdr_v2_5(void)
			goto fail_send_cmd1;
		} else {
			dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx),
				GFP_ATOMIC);
				flag);
			if (dma_cmd_ctx == NULL) {
				IPAERR("fail to alloc immediate cmd\n");
				rc = -ENOMEM;
@@ -456,7 +458,7 @@ int __ipa_commit_hdr_v2_5(void)
			goto fail_send_cmd1;
		} else {
			reg_write_cmd = kzalloc(sizeof(*reg_write_cmd),
				GFP_ATOMIC);
				flag);
			if (reg_write_cmd == NULL) {
				IPAERR("fail to alloc immediate cmd\n");
				rc = -ENOMEM;
@@ -722,6 +724,11 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
				entry->hdr,
				entry->hdr_len,
				DMA_TO_DEVICE);
			if (dma_mapping_error(ipa_ctx->pdev,
				entry->phys_base)) {
				IPAERR("dma_map_single failure for entry\n");
				goto fail_dma_mapping;
			}
		}
	} else {
		entry->is_hdr_proc_ctx = false;
@@ -798,6 +805,8 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
	list_del(&entry->link);
	dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
			entry->hdr_len, DMA_TO_DEVICE);
fail_dma_mapping:
	entry->is_hdr_proc_ctx = false;
bad_hdr_len:
	entry->cookie = 0;
	kmem_cache_free(ipa_ctx->hdr_cache, entry);
+3 −2
Original line number Diff line number Diff line
@@ -695,6 +695,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
	struct ipa_mem_buffer head;
	struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL;
	struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL;
	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
	u16 avail;
	u32 num_modem_rt_index;
	int rc = 0;
@@ -745,7 +746,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
	}

	cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
		GFP_KERNEL);
		flag);
	if (cmd1 == NULL) {
		IPAERR("Failed to alloc immediate command object\n");
		rc = -ENOMEM;
@@ -762,7 +763,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)

	if (lcl) {
		cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
			GFP_KERNEL);
			flag);
		if (cmd2 == NULL) {
			IPAERR("Failed to alloc immediate command object\n");
			rc = -ENOMEM;