Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 10e5e375 authored by Vijay Viswanath's avatar Vijay Viswanath Committed by Martin K. Petersen
Browse files

scsi: ufs: Add clock ungating to a separate workqueue



UFS driver can receive a request during memory reclaim by kswapd.  So
when ufs driver puts the ungate work in queue, and if there are no idle
workers, kthreadd is invoked to create a new kworker. Since kswapd task
holds a mutex which kthreadd also needs, this can cause a deadlock
situation. So ungate work must be done in a separate work queue with
WQ_MEM_RECLAIM flag enabled.  Such a workqueue will have a rescue thread
which will be called when the above deadlock condition is possible.

Signed-off-by: default avatarVijay Viswanath <vviswana@codeaurora.org>
Signed-off-by: default avatarCan Guo <cang@codeaurora.org>
Signed-off-by: default avatarAsutosh Das <asutoshd@codeaurora.org>
Reviewed-by: default avatarSubhash Jadavani <subhashj@codeaurora.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 7f6ba4f1
Loading
Loading
Loading
Loading
+10 −1
Original line number Original line Diff line number Diff line
@@ -1532,7 +1532,8 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
		hba->clk_gating.state = REQ_CLKS_ON;
		hba->clk_gating.state = REQ_CLKS_ON;
		trace_ufshcd_clk_gating(dev_name(hba->dev),
		trace_ufshcd_clk_gating(dev_name(hba->dev),
					hba->clk_gating.state);
					hba->clk_gating.state);
		schedule_work(&hba->clk_gating.ungate_work);
		queue_work(hba->clk_gating.clk_gating_workq,
			   &hba->clk_gating.ungate_work);
		/*
		/*
		 * fall through to check if we should wait for this
		 * fall through to check if we should wait for this
		 * work to be done or not.
		 * work to be done or not.
@@ -1718,6 +1719,8 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,


static void ufshcd_init_clk_gating(struct ufs_hba *hba)
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
{
	char wq_name[sizeof("ufs_clk_gating_00")];

	if (!ufshcd_is_clkgating_allowed(hba))
	if (!ufshcd_is_clkgating_allowed(hba))
		return;
		return;


@@ -1725,6 +1728,11 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);


	snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
		 hba->host->host_no);
	hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
							   WQ_MEM_RECLAIM);

	hba->clk_gating.is_enabled = true;
	hba->clk_gating.is_enabled = true;


	hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
	hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
@@ -1752,6 +1760,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
	cancel_work_sync(&hba->clk_gating.ungate_work);
	cancel_work_sync(&hba->clk_gating.ungate_work);
	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
	destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
}


/* Must be called with host lock acquired */
/* Must be called with host lock acquired */
+1 −0
Original line number Original line Diff line number Diff line
@@ -362,6 +362,7 @@ struct ufs_clk_gating {
	struct device_attribute enable_attr;
	struct device_attribute enable_attr;
	bool is_enabled;
	bool is_enabled;
	int active_reqs;
	int active_reqs;
	struct workqueue_struct *clk_gating_workq;
};
};


struct ufs_saved_pwr_info {
struct ufs_saved_pwr_info {