Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 347c552b authored by Skylar Chang's avatar Skylar Chang
Browse files

msm: ipa: suspend apps endpoint on clock gating



Before going to clock gating suspend all IPA apps pipes.
In case a packet arrives to apps while apps voted for IPA clock
gating, a suspend interrupt will be fired and IPA clocks will be voted.

Change-Id: I1fb22ede33c09032ec63106c9d04de3b08dff5b2
Acked-by: default avatarAdy Abraham <adya@qti.qualcomm.com>
Signed-off-by: default avatarSkylar Chang <chiaweic@codeaurora.org>
parent c8df36dc
Loading
Loading
Loading
Loading
+42 −126
Original line number Diff line number Diff line
@@ -168,9 +168,6 @@ struct ipa_ioc_nat_alloc_mem32 {
static void ipa_start_tag_process(struct work_struct *work);
static DECLARE_WORK(ipa_tag_work, ipa_start_tag_process);

static void ipa_sps_process_irq(struct work_struct *work);
static DECLARE_WORK(ipa_sps_process_irq_work, ipa_sps_process_irq);

static void ipa_sps_release_resource(struct work_struct *work);
static DECLARE_DELAYED_WORK(ipa_sps_release_resource_work,
	ipa_sps_release_resource);
@@ -2479,6 +2476,7 @@ void _ipa_enable_clks_v2_0(void)

	if (smmu_clk)
		clk_prepare_enable(smmu_clk);
	ipa_suspend_apps_pipes(false);
}

void _ipa_enable_clks_v1_1(void)
@@ -2600,6 +2598,7 @@ void _ipa_disable_clks_v1_1(void)
void _ipa_disable_clks_v2_0(void)
{
	IPADBG("disabling gcc_ipa_clk\n");
	ipa_suspend_apps_pipes(true);
	ipa_uc_notify_clk_state(false);
	if (ipa_clk)
		clk_disable_unprepare(ipa_clk);
@@ -2915,6 +2914,13 @@ static int ipa_init_flt_block(void)
	return result;
}

static void ipa_sps_process_irq_schedule_rel(void)
{
	queue_delayed_work(ipa_ctx->sps_power_mgmt_wq,
		&ipa_sps_release_resource_work,
		msecs_to_jiffies(IPA_SPS_PROD_TIMEOUT_MSEC));
}

/**
* ipa_suspend_handler() - Handles the suspend interrupt:
* wakes up the suspended peripheral by requesting its consumer
@@ -2940,52 +2946,34 @@ void ipa_suspend_handler(enum ipa_irq_type interrupt,

	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
		if ((suspend_data & bmsk) && (ipa_ctx->ep[i].valid)) {
			if (IPA_CLIENT_IS_APPS_CONS(ipa_ctx->ep[i].client)) {
				/*
				 * pipe will be unsuspended as part of
				 * enabling IPA clocks
				 */
				ipa_inc_client_enable_clks();
				ipa_ctx->sps_pm.dec_clients = true;
				ipa_sps_process_irq_schedule_rel();
			} else {
				resource = ipa2_get_rm_resource_from_ep(i);
			res = ipa_rm_request_resource_with_timer(resource);
				res = ipa_rm_request_resource_with_timer(
					resource);
				if (res == EPERM &&
				IPA_CLIENT_IS_CONS(ipa_ctx->ep[i].client)) {
				    IPA_CLIENT_IS_CONS(
					ipa_ctx->ep[i].client)) {
					holb_cfg.en = 1;
					res = ipa2_cfg_ep_holb_by_client(
					   ipa_ctx->ep[i].client, &holb_cfg);
					if (res) {
					IPAERR("holb en fail,IPAHW stall\n");
						IPAERR("holb en fail\n");
						IPAERR("IPAHW stall\n");
						BUG();
					}
				}
			}
		bmsk = bmsk << 1;
	}
		}

static void ipa_sps_process_irq_schedule_rel(void)
{
	ipa_ctx->sps_pm.res_rel_in_prog = true;
	queue_delayed_work(ipa_ctx->sps_power_mgmt_wq,
			   &ipa_sps_release_resource_work,
			   msecs_to_jiffies(IPA_SPS_PROD_TIMEOUT_MSEC));
			bmsk = bmsk << 1;
	}

static void ipa_sps_process_irq(struct work_struct *work)
{
	unsigned long flags;
	int ret;

	/* request IPA clocks */
	ipa_inc_client_enable_clks();

	/* mark SPS resource as granted */
	spin_lock_irqsave(&ipa_ctx->sps_pm.lock, flags);
	ipa_ctx->sps_pm.res_granted = true;
	IPADBG("IPA is ON, calling sps driver\n");

	/* process bam irq */
	ret = sps_bam_process_irq(ipa_ctx->bam_handle);
	if (ret)
		IPAERR("sps_process_eot_event failed %d\n", ret);

	/* release IPA clocks */
	ipa_sps_process_irq_schedule_rel();
	spin_unlock_irqrestore(&ipa_ctx->sps_pm.lock, flags);
}

static int apps_cons_release_resource(void)
@@ -3000,20 +2988,17 @@ static int apps_cons_request_resource(void)

static void ipa_sps_release_resource(struct work_struct *work)
{
	unsigned long flags;
	bool dec_clients = false;

	spin_lock_irqsave(&ipa_ctx->sps_pm.lock, flags);
	/* check whether still need to decrease client usage */
	if (ipa_ctx->sps_pm.res_rel_in_prog) {
		dec_clients = true;
		ipa_ctx->sps_pm.res_rel_in_prog = false;
		ipa_ctx->sps_pm.res_granted = false;
	}
	spin_unlock_irqrestore(&ipa_ctx->sps_pm.lock, flags);
	if (dec_clients)
	if (ipa_ctx->sps_pm.dec_clients) {
		if (atomic_read(&ipa_ctx->sps_pm.eot_activity)) {
			ipa_sps_process_irq_schedule_rel();
		} else {
			ipa_ctx->sps_pm.dec_clients = false;
			ipa_dec_client_disable_clks();
		}
	}
	atomic_set(&ipa_ctx->sps_pm.eot_activity, 0);
}

int ipa_create_apps_resource(void)
{
@@ -3038,62 +3023,7 @@ int ipa_create_apps_resource(void)
	return result;
}

/**
 * sps_event_cb() - Handles SPS events
 * @event: event to handle
 * @param: event-specific paramer
 *
 * This callback support the following events:
 *	- SPS_CALLBACK_BAM_RES_REQ: request resource
 *		Try to increase IPA active client counter.
 *		In case this can be done synchronously then
 *		return in *param true. Otherwise return false in *param
 *		and request IPA clocks. Later call to
 *		sps_bam_process_irq to process the pending irq.
 *	- SPS_CALLBACK_BAM_RES_REL: release resource
 *		schedule a delayed work for decreasing IPA active client
 *		counter. In case that during this time another request arrives,
 *		this work will be canceled.
 */
static void sps_event_cb(enum sps_callback_case event, void *param)
{
	unsigned long flags;

	spin_lock_irqsave(&ipa_ctx->sps_pm.lock, flags);

	switch (event) {
	case SPS_CALLBACK_BAM_RES_REQ:
	{
		bool *ready = (bool *)param;

		/* make sure no release will happen */
		cancel_delayed_work(&ipa_sps_release_resource_work);
		ipa_ctx->sps_pm.res_rel_in_prog = false;

		if (ipa_ctx->sps_pm.res_granted) {
			*ready = true;
		} else {
			if (ipa_inc_client_enable_clks_no_block() == 0) {
				ipa_ctx->sps_pm.res_granted = true;
				*ready = true;
			} else {
				queue_work(ipa_ctx->sps_power_mgmt_wq,
					   &ipa_sps_process_irq_work);
				*ready = false;
			}
		}
		break;
	}

	case SPS_CALLBACK_BAM_RES_REL:
		ipa_sps_process_irq_schedule_rel();
		break;
	default:
		IPADBG("unsupported event %d\n", event);
	}

	spin_unlock_irqrestore(&ipa_ctx->sps_pm.lock, flags);
}
/**
* ipa_init() - Initialize the IPA Driver
* @resource_p:	contain platform specific values from DST file
@@ -3272,10 +3202,6 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
		goto fail_create_sps_wq;
	}

	spin_lock_init(&ipa_ctx->sps_pm.lock);
	ipa_ctx->sps_pm.res_granted = false;
	ipa_ctx->sps_pm.res_rel_in_prog = false;

	/* register IPA with SPS driver */
	bam_props.phys_addr = resource_p->bam_mem_base;
	bam_props.virt_size = resource_p->bam_mem_size;
@@ -3286,13 +3212,11 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
	bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
	if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
		bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP;
	bam_props.options |= SPS_BAM_RES_CONFIRM;
	if (ipa_ctx->ipa_bam_remote_mode == true)
		bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
	if (ipa_ctx->smmu_present)
		bam_props.options |= SPS_BAM_SMMU_EN;
	bam_props.ee = resource_p->ee;
	bam_props.callback = sps_event_cb;
	bam_props.ipc_loglevel = 2;

	result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
@@ -4060,15 +3984,6 @@ int ipa2_ap_suspend(struct device *dev)
	int i;

	IPADBG("Enter...\n");
	/*
	 * In case SPS requested IPA resources fail to suspend.
	 * This can happen if SPS driver is during the processing of
	 * IPA BAM interrupt
	 */
	if (ipa_ctx->sps_pm.res_granted && !ipa_ctx->sps_pm.res_rel_in_prog) {
		IPAERR("SPS resource is granted, do not suspend\n");
		return -EAGAIN;
	}

	/* In case there is a tx/rx handler in polling mode fail to suspend */
	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
@@ -4081,6 +3996,7 @@ int ipa2_ap_suspend(struct device *dev)
	}

	/* release SPS IPA resource without waiting for inactivity timer */
	atomic_set(&ipa_ctx->sps_pm.eot_activity, 0);
	ipa_sps_release_resource(NULL);
	IPADBG("Exit\n");

+8 −0
Original line number Diff line number Diff line
@@ -653,6 +653,8 @@ static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)

	switch (notify->event_id) {
	case SPS_EVENT_EOT:
		if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
			atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
		if (!atomic_read(&sys->curr_polling_state)) {
			ret = sps_get_config(sys->ep->ep_hdl,
					&sys->ep->connect);
@@ -695,6 +697,8 @@ static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
	switch (notify->event_id) {
	case SPS_EVENT_EOT:
		tx_pkt = notify->data.transfer.user;
		if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
			atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
		queue_work(tx_pkt->sys->wq, &tx_pkt->work);
		break;
	default:
@@ -810,6 +814,8 @@ static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)

	switch (notify->event_id) {
	case SPS_EVENT_EOT:
		if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
			atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
		if (!atomic_read(&sys->curr_polling_state)) {
			ret = sps_get_config(sys->ep->ep_hdl,
					&sys->ep->connect);
@@ -2530,6 +2536,8 @@ void ipa_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
	switch (notify->event_id) {
	case SPS_EVENT_EOT:
		rx_pkt = notify->data.transfer.user;
		if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
			atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
		rx_pkt->len = notify->data.transfer.iovec.size;
		IPADBG("event %d notified sys=%p len=%u\n", notify->event_id,
				notify->user, rx_pkt->len);
+6 −7
Original line number Diff line number Diff line
@@ -1050,14 +1050,13 @@ struct ipa_uc_wdi_ctx {

/**
 * struct ipa_sps_pm - SPS power management related members
 * @lock: lock for ensuring atomic operations
 * @res_granted: true if SPS requested IPA resource and IPA granted it
 * @res_rel_in_prog: true if releasing IPA resource is in progress
 * @dec_clients: true if need to decrease active clients count
 * @eot_activity: represent EOT interrupt activity to determine to reset
 *  the inactivity timer
 */
struct ipa_sps_pm {
	spinlock_t lock;
	bool res_granted;
	bool res_rel_in_prog;
	bool dec_clients;
	atomic_t eot_activity;
};

/**
@@ -1936,7 +1935,7 @@ int ipa2_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
struct device *ipa2_get_dma_dev(void);
int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);

void ipa_suspend_apps_pipes(bool suspend);



+17 −0
Original line number Diff line number Diff line
@@ -5010,3 +5010,20 @@ u32 ipa_get_sys_yellow_wm(void)
		return 0;
}
EXPORT_SYMBOL(ipa_get_sys_yellow_wm);

void ipa_suspend_apps_pipes(bool suspend)
{
	struct ipa_ep_cfg_ctrl cfg;
	int ipa_ep_idx;

	memset(&cfg, 0, sizeof(cfg));
	cfg.ipa_ep_suspend = suspend;

	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
	if (ipa_ctx->ep[ipa_ep_idx].valid)
		ipa_cfg_ep_ctrl(ipa_ep_idx, &cfg);

	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
	if (ipa_ctx->ep[ipa_ep_idx].valid)
		ipa_cfg_ep_ctrl(ipa_ep_idx, &cfg);
}
+30 −119
Original line number Diff line number Diff line
@@ -163,9 +163,6 @@ struct ipa3_ioc_nat_alloc_mem32 {
static void ipa3_start_tag_process(struct work_struct *work);
static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);

static void ipa3_sps_process_irq(struct work_struct *work);
static DECLARE_WORK(ipa3_sps_process_irq_work, ipa3_sps_process_irq);

static void ipa3_sps_release_resource(struct work_struct *work);
static DECLARE_DELAYED_WORK(ipa3_sps_release_resource_work,
	ipa3_sps_release_resource);
@@ -2609,6 +2606,7 @@ void _ipa_enable_clks_v3_0(void)

	if (smmu_clk)
		clk_prepare_enable(smmu_clk);
	ipa3_suspend_apps_pipes(false);
}

static unsigned int ipa3_get_bus_vote(void)
@@ -2660,6 +2658,7 @@ void ipa3_enable_clks(void)
void _ipa_disable_clks_v3_0(void)
{
	IPADBG("disabling gcc_ipa_clk\n");
	ipa3_suspend_apps_pipes(true);
	ipa3_uc_notify_clk_state(false);
	if (ipa3_clk)
		clk_disable_unprepare(ipa3_clk);
@@ -2861,6 +2860,13 @@ int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
	return 0;
}

static void ipa3_sps_process_irq_schedule_rel(void)
{
	queue_delayed_work(ipa3_ctx->sps_power_mgmt_wq,
		&ipa3_sps_release_resource_work,
		msecs_to_jiffies(IPA_SPS_PROD_TIMEOUT_MSEC));
}

/**
* ipa3_suspend_handler() - Handles the suspend interrupt:
* wakes up the suspended peripheral by requesting its consumer
@@ -2881,42 +2887,21 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
	IPADBG("interrupt=%d, interrupt_data=%u\n", interrupt, suspend_data);
	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
		if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
			if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
				/*
				 * pipe will be unsuspended as part of
				 * enabling IPA clocks
				 */
				ipa3_inc_client_enable_clks();
				ipa3_ctx->sps_pm.dec_clients = true;
				ipa3_sps_process_irq_schedule_rel();
			} else {
				resource = ipa3_get_rm_resource_from_ep(i);
				ipa3_rm_request_resource_with_timer(resource);
			}
		bmsk = bmsk << 1;
	}
		}

static void ipa3_sps_process_irq_schedule_rel(void)
{
	ipa3_ctx->sps_pm.res_rel_in_prog = true;
	queue_delayed_work(ipa3_ctx->sps_power_mgmt_wq,
			   &ipa3_sps_release_resource_work,
			   msecs_to_jiffies(IPA_SPS_PROD_TIMEOUT_MSEC));
		bmsk = bmsk << 1;
	}

static void ipa3_sps_process_irq(struct work_struct *work)
{
	unsigned long flags;
	int ret;

	/* request IPA clocks */
	ipa3_inc_client_enable_clks();

	/* mark SPS resource as granted */
	spin_lock_irqsave(&ipa3_ctx->sps_pm.lock, flags);
	ipa3_ctx->sps_pm.res_granted = true;
	IPADBG("IPA is ON, calling sps driver\n");

	/* process bam irq */
	ret = sps_bam_process_irq(ipa3_ctx->bam_handle);
	if (ret)
		IPAERR("sps_process_eot_event failed %d\n", ret);

	/* release IPA clocks */
	ipa3_sps_process_irq_schedule_rel();
	spin_unlock_irqrestore(&ipa3_ctx->sps_pm.lock, flags);
}

static int ipa3_apps_cons_release_resource(void)
@@ -2931,20 +2916,17 @@ static int ipa3_apps_cons_request_resource(void)

static void ipa3_sps_release_resource(struct work_struct *work)
{
	unsigned long flags;
	bool dec_clients = false;

	spin_lock_irqsave(&ipa3_ctx->sps_pm.lock, flags);
	/* check whether still need to decrease client usage */
	if (ipa3_ctx->sps_pm.res_rel_in_prog) {
		dec_clients = true;
		ipa3_ctx->sps_pm.res_rel_in_prog = false;
		ipa3_ctx->sps_pm.res_granted = false;
	}
	spin_unlock_irqrestore(&ipa3_ctx->sps_pm.lock, flags);
	if (dec_clients)
	if (ipa3_ctx->sps_pm.dec_clients) {
		if (atomic_read(&ipa3_ctx->sps_pm.eot_activity)) {
			ipa3_sps_process_irq_schedule_rel();
		} else {
			ipa3_ctx->sps_pm.dec_clients = false;
			ipa3_dec_client_disable_clks();
		}
	}
	atomic_set(&ipa3_ctx->sps_pm.eot_activity, 0);
}

int ipa3_create_apps_resource(void)
{
@@ -2971,63 +2953,6 @@ int ipa3_create_apps_resource(void)
	return result;
}

/**
 * ipa3_sps_event_cb() - Handles SPS events
 * @event: event to handle
 * @param: event-specific paramer
 *
 * This callback support the following events:
 *	- SPS_CALLBACK_BAM_RES_REQ: request resource
 *		Try to increase IPA active client counter.
 *		In case this can be done synchronously then
 *		return in *param true. Otherwise return false in *param
 *		and request IPA clocks. Later call to
 *		sps_bam_process_irq to process the pending irq.
 *	- SPS_CALLBACK_BAM_RES_REL: release resource
 *		schedule a delayed work for decreasing IPA active client
 *		counter. In case that during this time another request arrives,
 *		this work will be canceled.
 */
static void ipa3_sps_event_cb(enum sps_callback_case event, void *param)
{
	unsigned long flags;

	spin_lock_irqsave(&ipa3_ctx->sps_pm.lock, flags);

	switch (event) {
	case SPS_CALLBACK_BAM_RES_REQ:
	{
		bool *ready = (bool *)param;

		/* make sure no release will happen */
		cancel_delayed_work(&ipa3_sps_release_resource_work);
		ipa3_ctx->sps_pm.res_rel_in_prog = false;

		if (ipa3_ctx->sps_pm.res_granted) {
			*ready = true;
		} else {
			if (ipa3_inc_client_enable_clks_no_block() == 0) {
				ipa3_ctx->sps_pm.res_granted = true;
				*ready = true;
			} else {
				queue_work(ipa3_ctx->sps_power_mgmt_wq,
					   &ipa3_sps_process_irq_work);
				*ready = false;
			}
		}
		break;
	}

	case SPS_CALLBACK_BAM_RES_REL:
		ipa3_sps_process_irq_schedule_rel();
		break;
	default:
		IPADBG("unsupported event %d\n", event);
	}

	spin_unlock_irqrestore(&ipa3_ctx->sps_pm.lock, flags);
}

/**
 * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
 *  The idr strcuture per filtering table is intended for rule id generation
@@ -3250,10 +3175,6 @@ static int ipa3_init(const struct ipa3_plat_drv_res *resource_p,
		goto fail_create_sps_wq;
	}

	spin_lock_init(&ipa3_ctx->sps_pm.lock);
	ipa3_ctx->sps_pm.res_granted = false;
	ipa3_ctx->sps_pm.res_rel_in_prog = false;

	/* register IPA with SPS driver */
	bam_props.phys_addr = resource_p->bam_mem_base;
	bam_props.virt_size = resource_p->bam_mem_size;
@@ -3264,13 +3185,11 @@ static int ipa3_init(const struct ipa3_plat_drv_res *resource_p,
	bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
		bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP;
	bam_props.options |= SPS_BAM_RES_CONFIRM;
	if (ipa3_ctx->ipa_bam_remote_mode == true)
		bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
	if (ipa3_ctx->smmu_present)
		bam_props.options |= SPS_BAM_SMMU_EN;
	bam_props.ee = resource_p->ee;
	bam_props.callback = ipa3_sps_event_cb;
	bam_props.ipc_loglevel = 2;

	result = sps_register_bam_device(&bam_props, &ipa3_ctx->bam_handle);
@@ -4012,15 +3931,6 @@ int ipa3_ap_suspend(struct device *dev)
	int i;

	IPADBG("Enter...\n");
	/*
	 * In case SPS requested IPA resources fail to suspend.
	 * This can happen if SPS driver is during the processing of
	 * IPA BAM interrupt
	 */
	if (ipa3_ctx->sps_pm.res_granted && !ipa3_ctx->sps_pm.res_rel_in_prog) {
		IPAERR("SPS resource is granted, do not suspend\n");
		return -EAGAIN;
	}

	/* In case there is a tx/rx handler in polling mode fail to suspend */
	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
@@ -4033,6 +3943,7 @@ int ipa3_ap_suspend(struct device *dev)
	}

	/* release SPS IPA resource without waiting for inactivity timer */
	atomic_set(&ipa3_ctx->sps_pm.eot_activity, 0);
	ipa3_sps_release_resource(NULL);
	IPADBG("Exit\n");

Loading