Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0965b6bf authored by Ashok Vuyyuru's avatar Ashok Vuyyuru
Browse files

msm: ipa4: Allocate coalescing close frame command payload



Coalescing close frame command allocated multiple times during
every suspend instead allocate command payload once in post init.

Change-Id: Iba83a7ff032707739d0a3b006ee6fa5245ac3857
Signed-off-by: default avatarAshok Vuyyuru <avuyyuru@codeaurora.org>
parent 24a21204
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -5848,6 +5848,12 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
		goto fail_dma_task;
	}

	result = ipa3_allocate_coal_close_frame();
	if (result) {
		IPAERR("failed to allocate coal frame cmd\n");
		goto fail_coal_frame;
	}

	if (ipa3_nat_ipv6ct_init_devices()) {
		IPAERR("unable to init NAT and IPv6CT devices\n");
		result = -ENODEV;
@@ -6054,6 +6060,8 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
fail_allok_pkt_init:
	ipa3_nat_ipv6ct_destroy_devices();
fail_nat_ipv6ct_init_dev:
	ipa3_free_coal_close_frame();
fail_coal_frame:
	ipa3_free_dma_task_for_gsi();
fail_dma_task:
fail_init_hw:
+3 −3
Original line number Diff line number Diff line
@@ -4970,8 +4970,8 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
	}
	cnt += weight - remain_aggr_weight * IPA_WAN_AGGR_PKT_CNT;
	/* call repl_hdlr before napi_reschedule / napi_complete */
	if (cnt)
	ep->sys->repl_hdlr(ep->sys);

	/* When not able to replenish enough descriptors pipe wait
	 * until minimum number descripotrs to replish.
	 */
@@ -4984,7 +4984,7 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
		ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
	} else {
		cnt = weight;
		IPADBG("Client = %d not replenished free descripotrs\n",
		IPADBG_LOW("Client = %d not replenished free descripotrs\n",
				ep->client);
	}
	return cnt;
+4 −0
Original line number Diff line number Diff line
@@ -1863,6 +1863,7 @@ struct ipa3_pc_mbox_data {
 * @icc_num_cases - number of icc scaling level supported
 * @icc_num_paths - number of paths icc would vote for bw
 * @icc_clk - table for icc bw clock value
 * @coal_cmd_pyld: holds the coslescing close frame command payload
 */
struct ipa3_context {
	struct ipa3_char_device_context cdev;
@@ -2039,6 +2040,7 @@ struct ipa3_context {
	u32 icc_num_cases;
	u32 icc_num_paths;
	u32 icc_clk[IPA_ICC_LVL_MAX][IPA_ICC_PATH_MAX][IPA_ICC_TYPE_MAX];
	struct ipahal_imm_cmd_pyld *coal_cmd_pyld;
};

struct ipa3_plat_drv_res {
@@ -3119,6 +3121,8 @@ void ipa3_disable_prefetch(enum ipa_client_type client);
int ipa3_alloc_common_event_ring(void);
int ipa3_allocate_dma_task_for_gsi(void);
void ipa3_free_dma_task_for_gsi(void);
int ipa3_allocate_coal_close_frame(void);
void ipa3_free_coal_close_frame(void);
int ipa3_set_clock_plan_from_pm(int idx);
void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys);
int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs);
+36 −23
Original line number Diff line number Diff line
@@ -8062,9 +8062,6 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)

void ipa3_force_close_coal(void)
{
	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
	struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
	struct ipahal_reg_valmask valmask;
	struct ipa3_desc desc;
	int ep_idx;

@@ -8072,28 +8069,11 @@ void ipa3_force_close_coal(void)
	if (ep_idx == IPA_EP_NOT_ALLOCATED || (!ipa3_ctx->ep[ep_idx].valid))
		return;

	reg_write_cmd.skip_pipeline_clear = false;
	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
	ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
	reg_write_cmd.value = valmask.val;
	reg_write_cmd.value_mask = valmask.mask;
	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
		&reg_write_cmd, false);
	if (!cmd_pyld) {
		IPAERR("fail construct register_write imm cmd\n");
		ipa_assert();
		return;
	}
	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
	ipa3_init_imm_cmd_desc(&desc, ipa3_ctx->coal_cmd_pyld);

	IPADBG("Sending 1 descriptor for coal force close\n");
	if (ipa3_send_cmd_timeout(1, &desc,
		IPA_COAL_CLOSE_FRAME_CMD_TIMEOUT_MSEC)) {
		IPAERR("ipa3_send_cmd failed\n");
		ipa_assert();
	}
	ipahal_destroy_imm_cmd(cmd_pyld);
	if (ipa3_send_cmd(1, &desc))
		IPADBG("ipa3_send_cmd timedout\n");
}

int ipa3_suspend_apps_pipes(bool suspend)
@@ -8222,6 +8202,39 @@ void ipa3_free_dma_task_for_gsi(void)
	memset(&ipa3_ctx->dma_task_info, 0, sizeof(ipa3_ctx->dma_task_info));
}

int ipa3_allocate_coal_close_frame(void)
{
	struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
	struct ipahal_reg_valmask valmask;
	int ep_idx;

	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
	if (ep_idx == IPA_EP_NOT_ALLOCATED)
		return 0;
	IPADBG("Allocate coal close frame cmd\n");
	reg_write_cmd.skip_pipeline_clear = false;
	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
	ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
	reg_write_cmd.value = valmask.val;
	reg_write_cmd.value_mask = valmask.mask;
	ipa3_ctx->coal_cmd_pyld =
		ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
			&reg_write_cmd, false);
	if (!ipa3_ctx->coal_cmd_pyld) {
		IPAERR("fail construct register_write imm cmd\n");
		ipa_assert();
		return 0;
	}

	return 0;
}

void ipa3_free_coal_close_frame(void)
{
	if (ipa3_ctx->coal_cmd_pyld)
		ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld);
}
/**
 * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
 *