Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2d22f881 authored by Sivan Reinstein's avatar Sivan Reinstein
Browse files

msm: ipa: add support for modem SSR



As part of modem Sub System Restart, ipa driver needs to perform
cleanup before the modem goes down and reinitialization after the
modem goes back up.

CRs-Fixed: 675048
Change-Id: I39b156f2060b47c3bdbe9236a5129055b2d854dd
Signed-off-by: default avatarSivan Reinstein <sivanr@codeaurora.org>
parent d7ed6c7f
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -5,6 +5,9 @@ This module enables embedded data calls using IPA HW.
Required properties:
- compatible:		Must be "qcom,rmnet-ipa"

Optional:
- qcom,rmnet-ipa-ssr: determine if modem SSR is supported

Example:
	qcom,rmnet-ipa {
		compatible = "qcom,rmnet-ipa";
+428 −0
Original line number Diff line number Diff line
@@ -51,9 +51,20 @@

#define IPA_AGGR_MAX_STR_LENGTH (10)

#define CLEANUP_TAG_PROCESS_TIMEOUT 20

#define IPA_AGGR_STR_IN_BYTES(str) \
	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)

#define IPA_Q6_CLEANUP_FLT_RT_MAX_CMDS \
	(IPA_NUM_PIPES*2 + \
	(IPA_v2_V4_MODEM_RT_INDEX_HI - IPA_v2_V4_MODEM_RT_INDEX_LO + 1) + \
	(IPA_v2_V6_MODEM_RT_INDEX_HI - IPA_v2_V6_MODEM_RT_INDEX_LO + 1)) \

/* To be on the safe side */
#define IPA_Q6_CLEANUP_EXP_AGGR_MAX_CMDS \
	(IPA_NUM_PIPES*2) \

#ifdef CONFIG_COMPAT
#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
					IPA_IOCTL_ADD_HDR, \
@@ -980,6 +991,423 @@ bail:
	return ret;
}

static int ipa_init_smem_region(int memory_region_size,
				int memory_region_offset)
{
	struct ipa_hw_imm_cmd_dma_shared_mem cmd;
	struct ipa_desc desc;
	struct ipa_mem_buffer mem;
	int rc;

	memset(&desc, 0, sizeof(desc));
	memset(&cmd, 0, sizeof(cmd));
	memset(&mem, 0, sizeof(mem));

	mem.size = memory_region_size;
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size,
		&mem.phys_base, GFP_KERNEL);
	if (!mem.base) {
		IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
		return -ENOMEM;
	}

	memset(mem.base, 0, mem.size);
	cmd.size = mem.size;
	cmd.system_addr = mem.phys_base;
	cmd.local_addr = ipa_ctx->smem_restricted_bytes +
		memory_region_offset;
	desc.opcode = IPA_DMA_SHARED_MEM;
	desc.pyld = &cmd;
	desc.len = sizeof(cmd);
	desc.type = IPA_IMM_CMD_DESC;

	rc = ipa_send_cmd(1, &desc);
	if (rc) {
		IPAERR("failed to send immediate command (error %d)\n", rc);
		rc = -EFAULT;
	}

	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
		mem.phys_base);

	return rc;
}

/**
* ipa_init_q6_smem() - Initialize Q6 general memory and
*                      header memory regions in IPA.
*
* Return codes:
* 0: success
* -ENOMEM: failed to allocate dma memory
* -EFAULT: failed to send IPA command to initialize the memory
*/
int ipa_init_q6_smem(void)
{
	int rc;

	rc = ipa_init_smem_region(IPA_v2_RAM_MODEM_SIZE,
				  IPA_v2_RAM_MODEM_OFST);

	if (rc) {
		IPAERR("failed to initialize Modem RAM memory\n");
		return rc;
	}

	rc = ipa_init_smem_region(IPA_v2_RAM_MODEM_HDR_SIZE,
				  IPA_v2_RAM_MODEM_HDR_OFST);

	if (rc) {
		IPAERR("failed to initialize Modem HDRs RAM memory\n");
		return rc;
	}

	return rc;
}

static void ipa_free_buffer(void *user1, int user2)
{
	kfree(user1);
}

static int ipa_q6_pipe_delay(void)
{
	u32 reg_val = 0;
	int client_idx;
	int ep_idx;

	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
			ep_idx = ipa_get_ep_mapping(client_idx);
			if (ep_idx == -1)
				continue;

			IPA_SETFIELD_IN_REG(reg_val, 1,
				IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
				IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);

			ipa_write_reg(ipa_ctx->mmio,
				IPA_ENDP_INIT_CTRL_N_OFST(ep_idx), reg_val);
		}
	}

	return 0;
}

static int ipa_q6_avoid_holb(void)
{
	u32 reg_val;
	int ep_idx;
	int client_idx;
	struct ipa_ep_cfg_ctrl avoid_holb;

	memset(&avoid_holb, 0, sizeof(avoid_holb));
	avoid_holb.ipa_ep_suspend = true;

	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
		if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
			ep_idx = ipa_get_ep_mapping(client_idx);
			if (ep_idx == -1)
				continue;

			/*
			 * ipa_cfg_ep_holb is not used here because we are
			 * setting HOLB on Q6 pipes, and from APPS perspective
			 * they are not valid, therefore, the above function
			 * will fail.
			 */
			reg_val = 0;
			IPA_SETFIELD_IN_REG(reg_val, 0,
				IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT,
				IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK);

			ipa_write_reg(ipa_ctx->mmio,
			IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(ep_idx),
				reg_val);

			reg_val = 0;
			IPA_SETFIELD_IN_REG(reg_val, 1,
				IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT,
				IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK);

			ipa_write_reg(ipa_ctx->mmio,
				IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(ep_idx),
				reg_val);

			ipa_cfg_ep_ctrl(ep_idx, &avoid_holb);
		}
	}

	return 0;
}

static int ipa_q6_clean_q6_tables(void)
{
	struct ipa_desc *desc;
	struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
	int client_idx;
	int ep_idx;
	int num_cmds = 0;
	int index;
	int retval;
	struct ipa_mem_buffer mem = { 0 };
	u32 *entry;

	mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base,
		GFP_KERNEL);
	if (!mem.base) {
		IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
		return -ENOMEM;
	}

	mem.size = 4;
	entry = mem.base;
	*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;

	desc = kzalloc(sizeof(struct ipa_desc) *
		IPA_Q6_CLEANUP_FLT_RT_MAX_CMDS, GFP_KERNEL);
	if (!desc) {
		IPAERR("failed to allocate memory\n");
		retval = -ENOMEM;
		goto bail_dma;
	}

	cmd = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem) *
		IPA_Q6_CLEANUP_FLT_RT_MAX_CMDS, GFP_KERNEL);
	if (!cmd) {
		IPAERR("failed to allocate memory\n");
		retval = -ENOMEM;
		goto bail_desc;
	}

	/*
	 * Iterating over all the pipes which are either invalid but connected
	 * or connected but not configured by AP.
	 */
	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
		ep_idx = ipa_get_ep_mapping(client_idx);
		if (ep_idx == -1)
			continue;

		if (!ipa_ctx->ep[ep_idx].valid ||
		    ipa_ctx->ep[ep_idx].skip_ep_cfg) {
			/*
			 * Need to point v4 and v6 fltr tables to an empty
			 * table
			 */
			cmd[num_cmds].size = mem.size;
			cmd[num_cmds].system_addr = mem.phys_base;
			cmd[num_cmds].local_addr =
				ipa_ctx->smem_restricted_bytes +
				IPA_v2_RAM_V4_FLT_OFST + 8 + ep_idx*4;

			desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
			desc[num_cmds].pyld = &cmd[num_cmds];
			desc[num_cmds].len = sizeof(*cmd);
			desc[num_cmds].type = IPA_IMM_CMD_DESC;
			num_cmds++;

			cmd[num_cmds].size = mem.size;
			cmd[num_cmds].system_addr =  mem.phys_base;
			cmd[num_cmds].local_addr =
				ipa_ctx->smem_restricted_bytes +
				IPA_v2_RAM_V6_FLT_OFST + 8 + ep_idx*4;

			desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
			desc[num_cmds].pyld = &cmd[num_cmds];
			desc[num_cmds].len = sizeof(*cmd);
			desc[num_cmds].type = IPA_IMM_CMD_DESC;
			num_cmds++;
		}
	}

	/* Need to point v4/v6 modem routing tables to an empty table */
	for (index = IPA_v2_V4_MODEM_RT_INDEX_LO; index <=
		IPA_v2_V4_MODEM_RT_INDEX_HI; index++) {
		cmd[num_cmds].size = mem.size;
		cmd[num_cmds].system_addr =  mem.phys_base;
		cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes +
			IPA_v2_RAM_V4_RT_OFST + index*4;

		desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
		desc[num_cmds].pyld = &cmd[num_cmds];
		desc[num_cmds].len = sizeof(*cmd);
		desc[num_cmds].type = IPA_IMM_CMD_DESC;
		num_cmds++;
	}

	for (index = IPA_v2_V6_MODEM_RT_INDEX_LO; index <=
		IPA_v2_V6_MODEM_RT_INDEX_HI; index++) {
		cmd[num_cmds].size = mem.size;
		cmd[num_cmds].system_addr =  mem.phys_base;
		cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes +
			IPA_v2_RAM_V6_RT_OFST + index*4;

		desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
		desc[num_cmds].pyld = &cmd[num_cmds];
		desc[num_cmds].len = sizeof(*cmd);
		desc[num_cmds].type = IPA_IMM_CMD_DESC;
		num_cmds++;
	}

	retval = ipa_send_cmd(num_cmds, desc);
	if (retval) {
		IPAERR("failed to send immediate command (error %d)\n", retval);
		retval = -EFAULT;
	}

	kfree(cmd);

bail_desc:
	kfree(desc);

bail_dma:
	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);

	return retval;
}

static void ipa_q6_disable_agg_reg(struct ipa_register_write *reg_write,
				   int ep_idx)
{
	reg_write->skip_pipeline_clear = 0;

	reg_write->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(ep_idx);
	reg_write->value =
		(1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
	reg_write->value_mask =
		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;

	reg_write->value |=
		((0 & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) <<
		IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT);
	reg_write->value_mask |=
		((IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK <<
		IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT));
}

static int ipa_q6_set_ex_path_dis_agg(void)
{
	int ep_idx;
	int client_idx;
	struct ipa_desc *desc;
	int num_descs = 0;
	int index;
	struct ipa_register_write *reg_write;
	int retval;

	desc = kzalloc(sizeof(struct ipa_desc) *
		IPA_Q6_CLEANUP_EXP_AGGR_MAX_CMDS, GFP_KERNEL);
	if (!desc) {
		IPAERR("failed to allocate memory\n");
		return -ENOMEM;
	}

	/* Set the exception path to AP */
	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
		ep_idx = ipa_get_ep_mapping(client_idx);
		if (ep_idx == -1)
			continue;

		if (ipa_ctx->ep[ep_idx].valid &&
			ipa_ctx->ep[ep_idx].skip_ep_cfg) {
			reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);

			if (!reg_write) {
				IPAERR("failed to allocate memory\n");
				BUG();
			}
			reg_write->skip_pipeline_clear = 0;
			reg_write->offset = IPA_ENDP_STATUS_n_OFST(ep_idx);
			reg_write->value =
				(ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) &
				IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
				IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
			reg_write->value_mask =
				IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
				IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;

			desc[num_descs].opcode = IPA_REGISTER_WRITE;
			desc[num_descs].pyld = reg_write;
			desc[num_descs].len = sizeof(*reg_write);
			desc[num_descs].type = IPA_IMM_CMD_DESC;
			desc[num_descs].callback = ipa_free_buffer;
			desc[num_descs].user1 = reg_write;
			num_descs++;
		}
	}

	/* Disable AGGR on IPA->Q6 pipes */
	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
		if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
			reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);

			if (!reg_write) {
				IPAERR("failed to allocate memory\n");
				BUG();
			}

			ipa_q6_disable_agg_reg(reg_write,
					       ipa_get_ep_mapping(client_idx));

			desc[num_descs].opcode = IPA_REGISTER_WRITE;
			desc[num_descs].pyld = reg_write;
			desc[num_descs].len = sizeof(*reg_write);
			desc[num_descs].type = IPA_IMM_CMD_DESC;
			desc[num_descs].callback = ipa_free_buffer;
			desc[num_descs].user1 = reg_write;
			num_descs++;
		}
	}

	/* Will wait 20msecs for IPA tag process completion */
	retval = ipa_tag_process(desc, num_descs,
				 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
	if (retval) {
		IPAERR("TAG process failed! (error %d)\n", retval);
		for (index = 0; index < num_descs; index++)
			kfree(desc[index].user1);
		retval = -EINVAL;
	}

	kfree(desc);

	return retval;
}

/**
* ipa_q6_cleanup() - A cleanup for all Q6 related configuration
*                    in IPA HW. This is performed in case of SSR.
*
* Return codes:
* 0: success
* This is a mandatory procedure, in case one of the steps fails, the
* AP needs to restart.
*/
int ipa_q6_cleanup(void)
{
	if (ipa_q6_pipe_delay()) {
		IPAERR("Failed to delay Q6 pipes\n");
		BUG();
	}
	if (ipa_q6_avoid_holb()) {
		IPAERR("Failed to set HOLB on Q6 pipes\n");
		BUG();
	}
	if (ipa_q6_clean_q6_tables()) {
		IPAERR("Failed to clean Q6 tables\n");
		BUG();
	}
	if (ipa_q6_set_ex_path_dis_agg()) {
		IPAERR("Failed to disable aggregation on Q6 pipes\n");
		BUG();
	}

	return 0;
}

static int ipa_init_sram(void)
{
	u32 *ipa_sram_mmio;
+16 −1
Original line number Diff line number Diff line
@@ -1640,6 +1640,7 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
	int pad_len_byte;
	int len;
	unsigned char *buf;
	bool drop_packet;

	IPA_DUMP_BUFF(skb->data, 0, skb->len);

@@ -1705,6 +1706,7 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,

begin:
	while (skb->len) {
		drop_packet = false;
		IPADBG("LEN_REM %d\n", skb->len);

		if (skb->len < IPA_PKT_STATUS_SIZE) {
@@ -1766,6 +1768,14 @@ begin:
		}
		if (status->endp_dest_idx == (sys->ep - ipa_ctx->ep)) {
			/* RX data */

			/*
			 * A packet which is received back to the AP after
			 * there was no route match.
			 */
			if (!status->exception && !status->route_match)
				drop_packet = true;

			if (skb->len == IPA_PKT_STATUS_SIZE &&
					!status->exception) {
				WARN_ON(sys->prev_skb != NULL);
@@ -1800,9 +1810,14 @@ begin:
							IPA_PKT_STATUS_SIZE);
					IPADBG("rx avail for %d\n",
							status->endp_dest_idx);
					sys->ep->client_notify(sys->ep->priv,
					if (drop_packet)
						dev_kfree_skb_any(skb2);
					else {
						sys->ep->client_notify(
							sys->ep->priv,
							IPA_RECEIVE,
							(unsigned long)(skb2));
					}
					skb_pull(skb, len +
						IPA_PKT_STATUS_SIZE);
				}
+4 −0
Original line number Diff line number Diff line
@@ -1011,5 +1011,9 @@ int ipa_active_clients_trylock(void);
void ipa_active_clients_unlock(void);
int ipa_wdi_init(void);
int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
int ipa_tag_process(struct ipa_desc *desc, int num_descs,
		    unsigned long timeout);

int ipa_q6_cleanup(void);
int ipa_init_q6_smem(void);
#endif /* _IPA_I_H_ */
+84 −38
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@
#include <linux/qmi_encdec.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <soc/qcom/subsystem_restart.h>

#include "ipa_qmi_service.h"
#include "ipa_ram_mmap.h"
@@ -32,6 +33,7 @@
#define IPA_Q6_SERVICE_SVC_ID 0x31
#define IPA_Q6_SERVICE_INS_ID 2

#define QMI_SEND_REQ_TIMEOUT_MS 10000

static struct qmi_handle *ipa_svc_handle;
static void ipa_a5_svc_recv_msg(struct work_struct *work);
@@ -42,6 +44,7 @@ static struct workqueue_struct *ipa_clnt_resp_workqueue;
static void *curr_conn;
static bool qmi_modem_init_fin, qmi_indication_fin;
static struct work_struct ipa_qmi_service_init_work;
static bool is_load_uc;


/* QMI A5 service */
@@ -296,6 +299,33 @@ static DECLARE_DELAYED_WORK(work_svc_exit, ipa_q6_clnt_svc_exit);
static struct qmi_handle *ipa_q6_clnt;
static int ipa_q6_clnt_reset;

static int ipa_check_qmi_response(int rc,
				  int req_id,
				  enum ipa_qmi_result_type_v01 result,
				  enum ipa_qmi_error_type_v01 error,
				  char *resp_type)
{
	if (rc < 0) {
		if (rc == -ETIMEDOUT && ipa_rmnet_ctx.ipa_rmnet_ssr) {
			IPAWANERR(
			"Timeout for qmi request id %d\n", req_id);
			return rc;
		}
		IPAWANERR("Error sending qmi request id %d, rc = %d\n",
			req_id, rc);
		return rc;
	}
	if (result != IPA_QMI_RESULT_SUCCESS_V01 &&
	    ipa_rmnet_ctx.ipa_rmnet_ssr) {
		IPAWANERR(
		"Got bad response %d from request id %d (error %d)\n",
		req_id, result, error);
		return result;
	}
	IPAWANDBG("Received %s successfully\n", resp_type);
	return 0;
}

static int qmi_init_modem_send_sync_msg(void)
{
	struct ipa_init_modem_driver_req_msg_v01 req;
@@ -329,6 +359,13 @@ static int qmi_init_modem_send_sync_msg(void)
	req.ctrl_comm_dest_end_pt_valid = true;
	req.ctrl_comm_dest_end_pt =
		ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
	if (is_load_uc) {  /* First time boot */
		req.is_ssr_bootup_valid = false;
		req.is_ssr_bootup = 0;
	} else {  /* After SSR boot */
		req.is_ssr_bootup_valid = true;
		req.is_ssr_bootup = 1;
	}

	IPAWANDBG("platform_type %d\n", req.platform_type);
	IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n",
@@ -353,6 +390,8 @@ static int qmi_init_modem_send_sync_msg(void)
			req.modem_mem_info.size);
	IPAWANDBG("ctrl_comm_dest_end_pt %d\n",
			req.ctrl_comm_dest_end_pt);
	IPAWANDBG("is_ssr_bootup %d\n",
			req.is_ssr_bootup);

	req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01;
	req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01;
@@ -363,14 +402,11 @@ static int qmi_init_modem_send_sync_msg(void)
	resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei;

	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
			       &resp_desc, &resp, sizeof(resp), 0);
	if (rc < 0) {
		IPAWANERR("send req failed %d\n", rc);
		return rc;
	}

	IPAWANDBG("Received ipa_init_modem_driver_resp_msg_v01 response\n");
	return rc;
			&resp_desc, &resp, sizeof(resp),
			QMI_SEND_REQ_TIMEOUT_MS);
	return ipa_check_qmi_response(rc,
		QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result,
		resp.resp.error, "ipa_init_modem_driver_resp_msg_v01");
}

/* sending filter-install-request to modem*/
@@ -401,18 +437,11 @@ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
			req,
			sizeof(struct ipa_install_fltr_rule_req_msg_v01),
			&resp_desc, &resp, sizeof(resp), 0);
	if (rc < 0) {
		IPAWANERR("send req failed %d\n", rc);
		return rc;
	}
	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
		IPAWANERR("got response failed %d\n",
				resp.resp.result);
		return resp.resp.result;
	}
	IPAWANDBG("Received ipa_install_filter response successfully\n");
	return rc;
			&resp_desc, &resp, sizeof(resp),
			QMI_SEND_REQ_TIMEOUT_MS);
	return ipa_check_qmi_response(rc,
		QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result,
		resp.resp.error, "ipa_install_filter");
}


@@ -445,18 +474,11 @@ int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
			&req_desc,
			req,
			sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
			&resp_desc, &resp, sizeof(resp), 0);
	if (rc < 0) {
		IPAWANERR("send req failed %d\n", rc);
		return rc;
	}
	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
		IPAWANERR("filter_notify failed %d\n",
			resp.resp.result);
		return resp.resp.result;
	}
	IPAWANDBG("Received ipa_fltr_installed_notif_resp successfully\n");
	return rc;
			&resp_desc, &resp, sizeof(resp),
			QMI_SEND_REQ_TIMEOUT_MS);
	return ipa_check_qmi_response(rc,
		QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result,
		resp.resp.error, "ipa_fltr_installed_notif_resp");
}

static void ipa_q6_clnt_recv_msg(struct work_struct *work)
@@ -487,6 +509,13 @@ static void ipa_q6_clnt_svc_arrive(struct work_struct *work)
	int rc;
	struct ipa_master_driver_init_complt_ind_msg_v01 ind;

	/*
	 * Setting the current connection to NULL, as due to a race between
	 * server and client clean-up in SSR, the disconnect_cb might not
	 * have necessarily been called
	 */
	curr_conn = NULL;

	/* Create a Local client port for QMI communication */
	ipa_q6_clnt = qmi_handle_create(ipa_q6_clnt_notify, NULL);
	if (!ipa_q6_clnt) {
@@ -621,25 +650,42 @@ static void ipa_qmi_service_init_worker(struct work_struct *work)
	return;
}

int ipa_qmi_service_init(void)
int ipa_qmi_service_init(bool load_uc)
{
	INIT_WORK(&ipa_qmi_service_init_work, ipa_qmi_service_init_worker);
	is_load_uc = load_uc;
	if (!ipa_svc_handle) {
		INIT_WORK(&ipa_qmi_service_init_work,
			ipa_qmi_service_init_worker);
		schedule_work(&ipa_qmi_service_init_work);
	}
	return 0;
}

void ipa_qmi_service_exit(void)
{
	int ret = 0;
	/* qmi-service */
	qmi_svc_unregister(ipa_svc_handle);
	ret = qmi_svc_unregister(ipa_svc_handle);
	if (ret < 0)
		IPAWANERR("Error unregistering qmi service handle %p, ret=%d\n",
		ipa_svc_handle, ret);
	flush_workqueue(ipa_svc_workqueue);
	qmi_handle_destroy(ipa_svc_handle);
	ret = qmi_handle_destroy(ipa_svc_handle);
	if (ret < 0)
		IPAWANERR("Error destroying qmi handle %p, ret=%d\n",
		ipa_svc_handle, ret);
	destroy_workqueue(ipa_svc_workqueue);

	/* qmi-client */
	qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID,
	ret = qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID,
				IPA_Q6_SVC_VERS,
				IPA_Q6_SERVICE_INS_ID, &ipa_q6_clnt_nb);
	if (ret < 0)
		IPAWANERR(
		"Error qmi_svc_event_notifier_unregister service %d, ret=%d\n",
		IPA_Q6_SERVICE_SVC_ID, ret);
	destroy_workqueue(ipa_clnt_req_workqueue);
	destroy_workqueue(ipa_clnt_resp_workqueue);

	ipa_svc_handle = 0;
}
Loading