Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d43967ba authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: handle missing interrupts"

parents aa8c2d7a 1f9f7c4d
Loading
Loading
Loading
Loading
+27 −0
Original line number Diff line number Diff line
@@ -3267,6 +3267,33 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
			mode == GSI_CHAN_MODE_CALLBACK) {
		atomic_set(&ctx->poll_mode, mode);
		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);

		/*
		 * In GSI 2.2 and 2.5 there is a limitation that can lead
		 * to losing an interrupt. For these versions an
		 * explicit check is needed after enabling the interrupt
		 */
		if (gsi_ctx->per.ver == GSI_VER_2_2 ||
		    gsi_ctx->per.ver == GSI_VER_2_5) {
			u32 src = gsi_readl(gsi_ctx->base +
				GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(
					gsi_ctx->per.ee));
			if (src & (1 << ctx->evtr->id)) {
				__gsi_config_ieob_irq(
					gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
				gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
					GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(
							gsi_ctx->per.ee));
				spin_unlock_irqrestore(&gsi_ctx->slock, flags);
				spin_lock_irqsave(&ctx->ring.slock, flags);
				atomic_set(
					&ctx->poll_mode, GSI_CHAN_MODE_POLL);
				spin_unlock_irqrestore(
					&ctx->ring.slock, flags);
				ctx->stats.poll_pending_irq++;
				return -GSI_STATUS_PENDING_IRQ;
			}
		}
		ctx->stats.poll_to_callback++;
	}
	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+1 −0
Original line number Diff line number Diff line
@@ -120,6 +120,7 @@ struct gsi_chan_stats {
	unsigned long completed;
	unsigned long callback_to_poll;
	unsigned long poll_to_callback;
	unsigned long poll_pending_irq;
	unsigned long invalid_tre_error;
	unsigned long poll_ok;
	unsigned long poll_empty;
+3 −2
Original line number Diff line number Diff line
@@ -273,9 +273,10 @@ static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx)
	PRT_STAT("queued=%lu compl=%lu\n",
		ctx->stats.queued,
		ctx->stats.completed);
	PRT_STAT("cb->poll=%lu poll->cb=%lu\n",
	PRT_STAT("cb->poll=%lu poll->cb=%lu poll_pend_irq=%lu\n",
		ctx->stats.callback_to_poll,
		ctx->stats.poll_to_callback);
		ctx->stats.poll_to_callback,
		ctx->stats.poll_pending_irq);
	PRT_STAT("invalid_tre_error=%lu\n",
		ctx->stats.invalid_tre_error);
	PRT_STAT("poll_ok=%lu poll_empty=%lu\n",
+34 −29
Original line number Diff line number Diff line
@@ -752,27 +752,24 @@ static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
/**
 * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
 */
static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
{
	int ret;

	if (!atomic_read(&sys->curr_polling_state)) {
		IPAERR("already in intr mode\n");
		goto fail;
	}
	atomic_set(&sys->curr_polling_state, 0);
	ipa3_dec_release_wakelock();
	ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
		GSI_CHAN_MODE_CALLBACK);
	if (ret != GSI_STATUS_SUCCESS) {
		if (ret == -GSI_STATUS_PENDING_IRQ) {
			ipa3_inc_acquire_wakelock();
			atomic_set(&sys->curr_polling_state, 1);
		} else {
			IPAERR("Failed to switch to intr mode.\n");
		goto fail;
		}
	return;
	}

fail:
	queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
			msecs_to_jiffies(1));
	return ret;
}

/**
@@ -785,13 +782,16 @@ static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
 */
static void ipa3_handle_rx(struct ipa3_sys_context *sys)
{
	int inactive_cycles = 0;
	int inactive_cycles;
	int cnt;
	int ret;

	if (ipa3_ctx->use_ipa_pm)
		ipa_pm_activate_sync(sys->pm_hdl);
	else
		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
start_poll:
	inactive_cycles = 0;
	do {
		cnt = ipa3_handle_rx_core(sys, true, true);
		if (cnt == 0)
@@ -814,7 +814,10 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys)
	} while (inactive_cycles <= POLLING_INACTIVITY_RX);

	trace_poll_to_intr3(sys->ep->client);
	ipa3_rx_switch_to_intr_mode(sys);
	ret = ipa3_rx_switch_to_intr_mode(sys);
	if (ret == -GSI_STATUS_PENDING_IRQ)
		goto start_poll;

	if (ipa3_ctx->use_ipa_pm)
		ipa_pm_deferred_deactivate(sys->pm_hdl);
	else
@@ -829,7 +832,7 @@ static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
	dwork = container_of(work, struct delayed_work, work);
	sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);

	if (sys->ep->napi_enabled) {
	if (sys->napi_obj) {
		/* interrupt mode is done in ipa3_rx_poll context */
		ipa_assert();
	} else
@@ -1004,7 +1007,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
	ep->valid = 1;
	ep->client = sys_in->client;
	ep->client_notify = sys_in->notify;
	ep->napi_enabled = sys_in->napi_enabled;
	ep->sys->napi_obj = sys_in->napi_obj;
	ep->priv = sys_in->priv;
	ep->keep_ipa_awake = sys_in->keep_ipa_awake;
	atomic_set(&ep->avail_fifo_desc,
@@ -1168,7 +1171,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
		return result;
	}

	if (ep->napi_enabled) {
	if (ep->sys->napi_obj) {
		do {
			usleep_range(95, 105);
		} while (atomic_read(&ep->sys->curr_polling_state));
@@ -1523,13 +1526,12 @@ static void ipa3_wq_handle_rx(struct work_struct *work)

	sys = container_of(work, struct ipa3_sys_context, work);

	if (sys->ep->napi_enabled) {
	if (sys->napi_obj) {
		if (!ipa3_ctx->use_ipa_pm)
			IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
		else
			ipa_pm_activate_sync(sys->pm_hdl);
		sys->ep->client_notify(sys->ep->priv,
				IPA_CLIENT_START_POLL, 0);
		napi_schedule(sys->napi_obj);
	} else
		ipa3_handle_rx(sys);
}
@@ -2988,7 +2990,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
					sys->repl_hdlr =
					   ipa3_replenish_rx_cache;
				}
				if (in->napi_enabled && in->recycle_enabled)
				if (in->napi_obj && in->recycle_enabled)
					sys->repl_hdlr =
					 ipa3_replenish_rx_cache_recycle;
				in->ipa_ep_cfg.aggr.aggr_sw_eof_active
@@ -3556,24 +3558,22 @@ void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
	 */
	if (ipa3_ctx->use_ipa_pm) {
		clk_off = ipa_pm_activate(sys->pm_hdl);
		if (!clk_off && sys->ep->napi_enabled) {
			sys->ep->client_notify(sys->ep->priv,
				IPA_CLIENT_START_POLL, 0);
		if (!clk_off && sys->napi_obj) {
			napi_schedule(sys->napi_obj);
			return;
		}
		queue_work(sys->wq, &sys->work);
		return;
	}

	if (sys->ep->napi_enabled) {
	if (sys->napi_obj) {
		struct ipa_active_client_logging_info log;

		IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
		clk_off = ipa3_inc_client_enable_clks_no_block(
			&log);
		if (!clk_off) {
			sys->ep->client_notify(sys->ep->priv,
				IPA_CLIENT_START_POLL, 0);
			napi_schedule(sys->napi_obj);
			return;
		}
	}
@@ -3775,7 +3775,7 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
		ep->gsi_mem_info.evt_ring_base_vaddr =
			gsi_evt_ring_props.ring_base_vaddr;

		if (ep->napi_enabled) {
		if (ep->sys->napi_obj) {
			gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
			gsi_evt_ring_props.int_modc = IPA_GSI_EVT_RING_INT_MODC;
		} else {
@@ -4062,6 +4062,7 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
	}

	ep = &ipa3_ctx->ep[clnt_hdl];
start_poll:
	while (remain_aggr_weight > 0 &&
			atomic_read(&ep->sys->curr_polling_state)) {
		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
@@ -4088,8 +4089,12 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
	}
	cnt += weight - remain_aggr_weight * IPA_WAN_AGGR_PKT_CNT;
	if (cnt < weight) {
		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
		ipa3_rx_switch_to_intr_mode(ep->sys);
		napi_complete(ep->sys->napi_obj);
		ret = ipa3_rx_switch_to_intr_mode(ep->sys);
		if (ret == -GSI_STATUS_PENDING_IRQ &&
				napi_reschedule(ep->sys->napi_obj))
			goto start_poll;

		if (ipa3_ctx->use_ipa_pm)
			ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
		else
+1 −2
Original line number Diff line number Diff line
@@ -761,7 +761,6 @@ struct ipa3_status_stats {
 * @disconnect_in_progress: Indicates client disconnect in progress.
 * @qmi_request_sent: Indicates whether QMI request to enable clear data path
 *					request is sent or not.
 * @napi_enabled: when true, IPA call client callback to start polling
 * @client_lock_unlock: callback function to take mutex lock/unlock for USB
 *				clients
 */
@@ -793,7 +792,6 @@ struct ipa3_ep_context {
	u32 gsi_offload_state;
	bool disconnect_in_progress;
	u32 qmi_request_sent;
	bool napi_enabled;
	u32 eot_in_poll_err;
	bool ep_delay_set;

@@ -882,6 +880,7 @@ struct ipa3_sys_context {
	void (*repl_hdlr)(struct ipa3_sys_context *sys);
	struct ipa3_repl_ctx repl;
	u32 pkt_sent;
	struct napi_struct *napi_obj;

	/* ordering is important - mutable fields go above */
	struct ipa3_ep_context *ep;
Loading