Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6564d8dc authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa3: Fix to processing of GSI IRQ when IPA is clock gated"

parents f92a437d 6701e893
Loading
Loading
Loading
Loading
+61 −1
Original line number Original line Diff line number Diff line
@@ -1736,7 +1736,7 @@ int gsi_stop_channel(unsigned long chan_hdl)
	res = wait_for_completion_timeout(&ctx->compl,
	res = wait_for_completion_timeout(&ctx->compl,
			msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
			msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
	if (res == 0) {
	if (res == 0) {
		GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
		GSIDBG("chan_hdl=%lu timed out\n", chan_hdl);
		res = -GSI_STATUS_TIMED_OUT;
		res = -GSI_STATUS_TIMED_OUT;
		goto free_lock;
		goto free_lock;
	}
	}
@@ -2056,6 +2056,66 @@ int gsi_query_channel_info(unsigned long chan_hdl,
}
}
EXPORT_SYMBOL(gsi_query_channel_info);
EXPORT_SYMBOL(gsi_query_channel_info);


int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
{
	struct gsi_chan_ctx *ctx;
	spinlock_t *slock;
	unsigned long flags;
	uint64_t rp;
	uint64_t wp;
	int ee = gsi_ctx->per.ee;

	if (!gsi_ctx) {
		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
		return -GSI_STATUS_NODEV;
	}

	if (chan_hdl >= GSI_MAX_CHAN || !is_empty) {
		GSIERR("bad params chan_hdl=%lu is_empty=%p\n",
				chan_hdl, is_empty);
		return -GSI_STATUS_INVALID_PARAMS;
	}

	ctx = &gsi_ctx->chan[chan_hdl];

	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}

	if (ctx->evtr)
		slock = &ctx->evtr->ring.slock;
	else
		slock = &ctx->ring.slock;

	spin_lock_irqsave(slock, flags);

	rp = gsi_readl(gsi_ctx->base +
		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
	ctx->ring.rp = rp;

	wp = gsi_readl(gsi_ctx->base +
		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
	wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
	ctx->ring.wp = wp;

	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
		*is_empty = (ctx->ring.rp_local == rp) ? true : false;
	else
		*is_empty = (wp == rp) ? true : false;

	spin_unlock_irqrestore(slock, flags);

	GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
			chan_hdl, rp, wp, ctx->ring.rp_local);

	return GSI_STATUS_SUCCESS;
}
EXPORT_SYMBOL(gsi_is_channel_empty);

int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
		struct gsi_xfer_elem *xfer, bool ring_db)
		struct gsi_xfer_elem *xfer, bool ring_db)
{
{
+4 −2
Original line number Original line Diff line number Diff line
@@ -3032,6 +3032,8 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
					&ipa3_ctx->transport_pm.dec_clients)
					&ipa3_ctx->transport_pm.dec_clients)
					) {
					) {
					ipa3_inc_client_enable_clks();
					ipa3_inc_client_enable_clks();
					IPADBG("Pipes un-suspended.\n");
					IPADBG("Enter poll mode.\n");
					atomic_set(
					atomic_set(
					&ipa3_ctx->transport_pm.dec_clients,
					&ipa3_ctx->transport_pm.dec_clients,
					1);
					1);
@@ -3076,7 +3078,7 @@ int ipa3_restore_suspend_handler(void)
	}
	}


	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
			ipa3_suspend_handler, true, NULL);
			ipa3_suspend_handler, false, NULL);
	if (result) {
	if (result) {
		IPAERR("register handler for suspend interrupt failed\n");
		IPAERR("register handler for suspend interrupt failed\n");
		result = -EPERM;
		result = -EPERM;
@@ -3157,7 +3159,7 @@ int ipa3_init_interrupts(void)


	/*add handler for suspend interrupt*/
	/*add handler for suspend interrupt*/
	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
			ipa3_suspend_handler, true, NULL);
			ipa3_suspend_handler, false, NULL);
	if (result) {
	if (result) {
		IPAERR("register handler for suspend interrupt failed\n");
		IPAERR("register handler for suspend interrupt failed\n");
		result = -ENODEV;
		result = -ENODEV;
+20 −9
Original line number Original line Diff line number Diff line
@@ -40,7 +40,7 @@ static void ipa3_enable_tx_suspend_wa(struct work_struct *work);
static DECLARE_DELAYED_WORK(dwork_en_suspend_int,
static DECLARE_DELAYED_WORK(dwork_en_suspend_int,
						ipa3_enable_tx_suspend_wa);
						ipa3_enable_tx_suspend_wa);
static spinlock_t suspend_wa_lock;
static spinlock_t suspend_wa_lock;
static void ipa3_process_interrupts(void);
static void ipa3_process_interrupts(bool isr_context);


static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
	[IPA_UC_TX_CMD_Q_NOT_FULL_IRQ]		= -1,
	[IPA_UC_TX_CMD_Q_NOT_FULL_IRQ]		= -1,
@@ -93,7 +93,7 @@ static bool ipa3_is_valid_ep(u32 ep_suspend_data)
	return false;
	return false;
}
}


static int ipa3_handle_interrupt(int irq_num)
static int ipa3_handle_interrupt(int irq_num, bool isr_context)
{
{
	struct ipa3_interrupt_info interrupt_info;
	struct ipa3_interrupt_info interrupt_info;
	struct ipa3_interrupt_work_wrap *work_data;
	struct ipa3_interrupt_work_wrap *work_data;
@@ -131,7 +131,8 @@ static int ipa3_handle_interrupt(int irq_num)
		break;
		break;
	}
	}


	if (interrupt_info.deferred_flag) {
	/* Force defer processing if in ISR context. */
	if (interrupt_info.deferred_flag || isr_context) {
		work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
		work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
				GFP_ATOMIC);
				GFP_ATOMIC);
		if (!work_data) {
		if (!work_data) {
@@ -183,7 +184,7 @@ static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
		, en);
		, en);
	ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
	ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
		IPA_IRQ_EN_EE_n_ADDR(ipa_ee), en);
		IPA_IRQ_EN_EE_n_ADDR(ipa_ee), en);
	ipa3_process_interrupts();
	ipa3_process_interrupts(false);
	ipa3_dec_client_disable_clks();
	ipa3_dec_client_disable_clks();


	IPADBG("Exit\n");
	IPADBG("Exit\n");
@@ -215,7 +216,7 @@ static void ipa3_tx_suspend_interrupt_wa(void)
	IPADBG("Exit\n");
	IPADBG("Exit\n");
}
}


static void ipa3_process_interrupts(void)
static void ipa3_process_interrupts(bool isr_context)
{
{
	u32 reg;
	u32 reg;
	u32 bmsk;
	u32 bmsk;
@@ -231,8 +232,18 @@ static void ipa3_process_interrupts(void)
	while (en & reg) {
	while (en & reg) {
		bmsk = 1;
		bmsk = 1;
		for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
		for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
			if (en & reg & bmsk)
			if (en & reg & bmsk) {
				ipa3_handle_interrupt(i);
				/*
				 * handle the interrupt with spin_lock
				 * unlocked to avoid calling client in atomic
				 * context. mutual exclusion still preserved
				 * as the read/clr is done with spin_lock
				 * locked.
				 */
				spin_unlock_irqrestore(&suspend_wa_lock, flags);
				ipa3_handle_interrupt(i, isr_context);
				spin_lock_irqsave(&suspend_wa_lock, flags);
			}
			bmsk = bmsk << 1;
			bmsk = bmsk << 1;
		}
		}
		ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
		ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
@@ -253,7 +264,7 @@ static void ipa3_interrupt_defer(struct work_struct *work)
{
{
	IPADBG("processing interrupts in wq\n");
	IPADBG("processing interrupts in wq\n");
	ipa3_inc_client_enable_clks();
	ipa3_inc_client_enable_clks();
	ipa3_process_interrupts();
	ipa3_process_interrupts(false);
	ipa3_dec_client_disable_clks();
	ipa3_dec_client_disable_clks();
	IPADBG("Done\n");
	IPADBG("Done\n");
}
}
@@ -276,7 +287,7 @@ static irqreturn_t ipa3_isr(int irq, void *ctxt)
		goto bail;
		goto bail;
	}
	}


	ipa3_process_interrupts();
	ipa3_process_interrupts(true);
	IPADBG("Exit\n");
	IPADBG("Exit\n");


bail:
bail:
+45 −3
Original line number Original line Diff line number Diff line
@@ -5061,21 +5061,63 @@ void ipa3_set_resorce_groups_min_max_limits(void)
	IPADBG("EXIT\n");
	IPADBG("EXIT\n");
}
}


static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
{
	bool empty;

	IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
	gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
	gsi_is_channel_empty(ep->gsi_chan_hdl, &empty);
	if (!empty) {
		IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
		/* queue a work to start polling if don't have one */
		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
		if (!atomic_read(&ep->sys->curr_polling_state)) {
			atomic_set(&ep->sys->curr_polling_state, 1);
			queue_work(ep->sys->wq, &ep->sys->work);
		}
	}
}

void ipa3_suspend_apps_pipes(bool suspend)
void ipa3_suspend_apps_pipes(bool suspend)
{
{
	struct ipa_ep_cfg_ctrl cfg;
	struct ipa_ep_cfg_ctrl cfg;
	int ipa_ep_idx;
	int ipa_ep_idx;
	struct ipa3_ep_context *ep;


	memset(&cfg, 0, sizeof(cfg));
	memset(&cfg, 0, sizeof(cfg));
	cfg.ipa_ep_suspend = suspend;
	cfg.ipa_ep_suspend = suspend;


	ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
	ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
	if (ipa3_ctx->ep[ipa_ep_idx].valid)
	ep = &ipa3_ctx->ep[ipa_ep_idx];
	if (ep->valid) {
		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
			ipa_ep_idx);
		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
		if (suspend)
			ipa3_gsi_poll_after_suspend(ep);
		else if (!atomic_read(&ep->sys->curr_polling_state))
			gsi_config_channel_mode(ep->gsi_chan_hdl,
				GSI_CHAN_MODE_CALLBACK);
	}


	ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
	if (ipa3_ctx->ep[ipa_ep_idx].valid)
	/* Considering the case for SSR. */
	if (ipa_ep_idx == -1) {
		IPADBG("Invalid client.\n");
		return;
	}
	ep = &ipa3_ctx->ep[ipa_ep_idx];
	if (ep->valid) {
		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
			ipa_ep_idx);
		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
		if (suspend)
			ipa3_gsi_poll_after_suspend(ep);
		else if (!atomic_read(&ep->sys->curr_polling_state))
			gsi_config_channel_mode(ep->gsi_chan_hdl,
				GSI_CHAN_MODE_CALLBACK);
	}
}
}


/**
/**
+18 −0
Original line number Original line Diff line number Diff line
@@ -861,6 +861,19 @@ int gsi_query_channel_db_addr(unsigned long chan_hdl,
int gsi_query_channel_info(unsigned long chan_hdl,
int gsi_query_channel_info(unsigned long chan_hdl,
		struct gsi_chan_info *info);
		struct gsi_chan_info *info);


/**
 * gsi_is_channel_empty - Peripheral can call this function to query if
 * the channel is empty. This is only applicable to GPI. "Empty" means
 * GSI has consumed all descriptors for a TO_GSI channel and SW has
 * processed all completed descriptors for a FROM_GSI channel.
 *
 * @chan_hdl:  Client handle previously obtained from gsi_alloc_channel
 * @is_empty:  set by GSI based on channel emptiness
 *
 * @Return gsi_status
 */
int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);

/**
/**
 * gsi_get_channel_cfg - This function returns the current config
 * gsi_get_channel_cfg - This function returns the current config
 * of the specified channel
 * of the specified channel
@@ -1098,6 +1111,11 @@ static inline int gsi_query_channel_info(unsigned long chan_hdl,
	return -GSI_STATUS_UNSUPPORTED_OP;
	return -GSI_STATUS_UNSUPPORTED_OP;
}
}


static inline int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
{
	return -GSI_STATUS_UNSUPPORTED_OP;
}

static inline int gsi_poll_channel(unsigned long chan_hdl,
static inline int gsi_poll_channel(unsigned long chan_hdl,
		struct gsi_chan_xfer_notify *notify)
		struct gsi_chan_xfer_notify *notify)
{
{