Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 78d343b5 authored by Yan He's avatar Yan He Committed by Andrei Danaila
Browse files

msm: pcie: add the support for link down handling



PCIe link can be down without advanced notification. Add the
handling and recovery for this kind of link down.

Change-Id: I80ef9365cec1a876c100bc196a3bf6905e8f0c2b
Signed-off-by: default avatarYan He <yanhe@codeaurora.org>
parent 5ebada7e
Loading
Loading
Loading
Loading
+56 −1
Original line number Diff line number Diff line
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -15,11 +15,45 @@

#include <linux/types.h>

enum msm_pcie_config {
	MSM_PCIE_CONFIG_INVALID = 0,
	MSM_PCIE_CONFIG_NO_CFG_RESTORE = 0x1,
	MSM_PCIE_CONFIG_LINKDOWN = 0x2,
};

enum msm_pcie_pm_opt {
	MSM_PCIE_SUSPEND,
	MSM_PCIE_RESUME
};

enum msm_pcie_event {
	MSM_PCIE_EVENT_INVALID = 0,
	MSM_PCIE_EVENT_LINKDOWN = 0x1,
	MSM_PCIE_EVENT_LINKUP = 0x2,
};

enum msm_pcie_trigger {
	MSM_PCIE_TRIGGER_CALLBACK,
	MSM_PCIE_TRIGGER_COMPLETION,
};

struct msm_pcie_notify {
	enum msm_pcie_event event;
	void *user;
	void *data;
	u32 options;
};

struct msm_pcie_register_event {
	u32 events;
	void *user;
	enum msm_pcie_trigger mode;
	void (*callback)(struct msm_pcie_notify *notify);
	struct msm_pcie_notify notify;
	struct completion *completion;
	u32 options;
};

/**
 * msm_pcie_pm_control - control the power state of a PCIe link.
 * @pm_opt:	power management operation
@@ -36,4 +70,25 @@ enum msm_pcie_pm_opt {
int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
			void *data, u32 options);

/**
 * msm_pcie_register_event - register an event with PCIe bus driver.
 * @reg:	event structure
 *
 * This function gives PCIe endpoint device drivers an option to register
 * events with PCIe bus driver.
 *
 * Return: 0 on success, negative value on error
 */
int msm_pcie_register_event(struct msm_pcie_register_event *reg);

/**
 * msm_pcie_deregister_event - deregister an event with PCIe bus driver.
 * @reg:	event structure
 *
 * This function gives PCIe endpoint device drivers an option to deregister
 * events with PCIe bus driver.
 *
 * Return: 0 on success, negative value on error
 */
int msm_pcie_deregister_event(struct msm_pcie_register_event *reg);
#endif
+217 −25
Original line number Diff line number Diff line
@@ -86,14 +86,6 @@
#define RD 0
#define WR 1

/* PM control options */
#define PM_IRQ                   0x1
#define PM_CLK                   0x2
#define PM_GPIO                  0x4
#define PM_VREG                  0x8
#define PM_PIPE_CLK              0x10
#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)

/* Timing Delays */
#define PERST_PROPAGATION_DELAY_US_MIN        10000
#define PERST_PROPAGATION_DELAY_US_MAX        15000
@@ -122,8 +114,6 @@ static int msm_pcie_debug_mask;
module_param_named(debug_mask, msm_pcie_debug_mask,
			    int, S_IRUGO | S_IWUSR | S_IWGRP);

struct mutex setup_lock;

/**
 *  PCIe driver state
 */
@@ -217,6 +207,36 @@ int msm_pcie_get_debug_mask(void)
	return msm_pcie_debug_mask;
}

void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
{
	int i;
	u32 val = 0;
	u32 *shadow;
	void *cfg;

	if (rc) {
		shadow = dev->rc_shadow;
		cfg = dev->dm_core;
	} else {
		shadow = dev->ep_shadow;
		cfg = dev->conf;
	}

	for (i = PCIE_CONF_SPACE_DW - 1; i >= 0; i--) {
		val = readl_relaxed(shadow + i);
		if (val != PCIE_CLEAR) {
			PCIE_DBG("PCIe: before recovery:cfg 0x%x:0x%x\n",
				i * 4, readl_relaxed(cfg + i * 4));
			PCIE_DBG("PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
				i, i * 4, val);
			writel_relaxed(val, cfg + i * 4);
			wmb();
			PCIE_DBG("PCIe: after recovery:cfg 0x%x:0x%x\n\n",
				i * 4, readl_relaxed(cfg + i * 4));
		}
	}
}

static void msm_pcie_write_mask(void __iomem *addr,
				uint32_t clear_mask, uint32_t set_mask)
{
@@ -325,6 +345,13 @@ static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
		writel_relaxed(wr_val, config_base + word_offset);
		wmb(); /* ensure config data is written to hardware register */

		if (dev->shadow_en) {
			if (rc)
				dev->rc_shadow[word_offset / 4] = wr_val;
			else
				dev->ep_shadow[word_offset / 4] = wr_val;
		}

		PCIE_DBG(
			"RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
			rc_idx, bus->number, devfn, where, size,
@@ -663,6 +690,23 @@ static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)

	dev_conf = BDF_OFFSET(1, 0, 0);

	if (dev->shadow_en) {
		writel_relaxed(0, dev->rc_shadow +
				PCIE20_PLR_IATU_VIEWPORT / 4);
		writel_relaxed(4, dev->rc_shadow +
				PCIE20_PLR_IATU_CTRL1 / 4);
		writel_relaxed(lower, dev->rc_shadow +
				PCIE20_PLR_IATU_LBAR / 4);
		writel_relaxed(upper, dev->rc_shadow +
				PCIE20_PLR_IATU_UBAR / 4);
		writel_relaxed(limit, dev->rc_shadow + PCIE20_PLR_IATU_LAR / 4);
		writel_relaxed(dev_conf, dev->rc_shadow +
				PCIE20_PLR_IATU_LTAR / 4);
		writel_relaxed(0, dev->rc_shadow + PCIE20_PLR_IATU_UTAR / 4);
		writel_relaxed(BIT(31), dev->rc_shadow +
				PCIE20_PLR_IATU_CTRL2 / 4);
	}

	/*
	 * program and enable address translation region 0 (device config
	 * address space); region type config;
@@ -708,6 +752,17 @@ static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
		msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
					PCIE20_ACK_N_FTS,
					dev->n_fts << 8);

	if (dev->shadow_en) {
		if (!dev->n_fts)
			msm_pcie_write_mask(dev->rc_shadow +
				PCIE20_ACK_F_ASPM_CTRL_REG / 4, 0, BIT(15));
		else
			msm_pcie_write_mask(dev->rc_shadow +
				PCIE20_ACK_F_ASPM_CTRL_REG / 4,
				PCIE20_ACK_N_FTS, dev->n_fts << 8);
	}

	PCIE_DBG("Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
		readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
}
@@ -731,6 +786,14 @@ static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev)
					BIT(3)|BIT(2)|BIT(1)|BIT(0));
	msm_pcie_write_mask(dev->dm_core + PCIE20_DEVICE_CONTROL2_STATUS2, 0,
					BIT(10));
	if (dev->shadow_en) {
		msm_pcie_write_mask(dev->rc_shadow +
			PCIE20_CAP_LINKCTRLSTATUS / 4, 0, BIT(1)|BIT(0));
		msm_pcie_write_mask(dev->rc_shadow + PCIE20_L1SUB_CONTROL1 / 4,
			0, BIT(3)|BIT(2)|BIT(1)|BIT(0));
		msm_pcie_write_mask(dev->rc_shadow +
			PCIE20_DEVICE_CONTROL2_STATUS2 / 4, 0, BIT(10));
	}
	PCIE_DBG("RC's CAP_LINKCTRLSTATUS:0x%x\n",
		readl_relaxed(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS));
	PCIE_DBG("RC's L1SUB_CONTROL1:0x%x\n",
@@ -746,6 +809,15 @@ static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev)
					BIT(3)|BIT(2)|BIT(1)|BIT(0));
	msm_pcie_write_mask(dev->conf + PCIE20_DEVICE_CONTROL2_STATUS2, 0,
					BIT(10));
	if (dev->shadow_en) {
		msm_pcie_write_mask(dev->ep_shadow +
			PCIE20_CAP_LINKCTRLSTATUS / 4, 0, BIT(1)|BIT(0));
		msm_pcie_write_mask(dev->ep_shadow + PCIE20_L1SUB_CONTROL1 / 4 +
					PCIE20_EP_L1SUB_CTL1_OFFSET / 4, 0,
					BIT(3)|BIT(2)|BIT(1)|BIT(0));
		msm_pcie_write_mask(dev->ep_shadow +
				PCIE20_DEVICE_CONTROL2_STATUS2 / 4, 0, BIT(10));
	}
	PCIE_DBG("EP's CAP_LINKCTRLSTATUS:0x%x\n",
		readl_relaxed(dev->conf + PCIE20_CAP_LINKCTRLSTATUS));
	PCIE_DBG("EP's L1SUB_CONTROL1:0x%x\n",
@@ -1009,16 +1081,21 @@ static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
	dev->dev_io_res = NULL;
}

static int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
{
	int ret;
	int ret = 0;
	uint32_t val;
	long int retries = 0;
	int link_check_count = 0;

	PCIE_DBG("RC%d\n", dev->rc_idx);

	mutex_lock(&setup_lock);
	mutex_lock(&dev->setup_lock);

	if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
		pr_err("PCIe:%s:the link is already enabled\n", __func__);
		goto out;
	}

	/* assert PCIe reset link to keep EP in reset */

@@ -1159,7 +1236,7 @@ clk_fail:
	msm_pcie_vreg_deinit(dev);
	msm_pcie_pipe_clk_deinit(dev);
out:
	mutex_unlock(&setup_lock);
	mutex_unlock(&dev->setup_lock);

	return ret;
}
@@ -1169,11 +1246,22 @@ void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
{
	PCIE_DBG("RC%d\n", dev->rc_idx);

	mutex_lock(&dev->setup_lock);

	if ((dev->link_status == MSM_PCIE_LINK_DISABLED)
		&& !(options & PM_EXPT)) {
		pr_err("PCIe:%s: the link is already enabled\n", __func__);
		mutex_unlock(&dev->setup_lock);
		return;
	}

	pr_info("PCIe: Assert the reset of endpoint of RC%d.\n", dev->rc_idx);

	gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
				dev->gpio[MSM_PCIE_GPIO_PERST].on);

	if (options & PM_CLK) {
		if (!(options & PM_EXPT))
			msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
					BIT(0));
		msm_pcie_clk_deinit(dev);
@@ -1186,6 +1274,8 @@ void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
		msm_pcie_pipe_clk_deinit(dev);

	dev->link_status = MSM_PCIE_LINK_DISABLED;

	mutex_unlock(&dev->setup_lock);
}

static int msm_pcie_setup(int nr, struct pci_sys_data *sys)
@@ -1329,6 +1419,7 @@ static int msm_pcie_probe(struct platform_device *pdev)
{
	int ret = 0;
	int rc_idx = -1;
	int i;

	PCIE_DBG("\n");

@@ -1439,6 +1530,10 @@ static int msm_pcie_probe(struct platform_device *pdev)
	msm_pcie_dev[rc_idx].user_suspend = false;
	msm_pcie_dev[rc_idx].saved_state = NULL;
	msm_pcie_dev[rc_idx].enumerated = false;
	msm_pcie_dev[rc_idx].handling_linkdown = 0;
	msm_pcie_dev[rc_idx].recovery_pending = false;
	msm_pcie_dev[rc_idx].linkdown_counter = 0;
	msm_pcie_dev[rc_idx].wake_counter = 0;
	memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
				sizeof(msm_pcie_vreg_info));
	memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
@@ -1451,6 +1546,11 @@ static int msm_pcie_probe(struct platform_device *pdev)
				sizeof(msm_pcie_res_info));
	memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
				sizeof(msm_pcie_irq_info));
	msm_pcie_dev[rc_idx].shadow_en = true;
	for (i = 0; i < PCIE_CONF_SPACE_DW; i++) {
		msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
		msm_pcie_dev[rc_idx].ep_shadow[i] = PCIE_CLEAR;
	}

	ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
				msm_pcie_dev[rc_idx].pdev);
@@ -1549,11 +1649,13 @@ static int __init pcie_init(void)
	pcie_drv.rc_num = 0;
	pcie_drv.rc_expected = 0;
	mutex_init(&pcie_drv.drv_lock);
	mutex_init(&setup_lock);

	for (i = 0; i < MAX_RC_NUM; i++) {
		spin_lock_init(&msm_pcie_dev[i].cfg_lock);
		msm_pcie_dev[i].cfg_access = true;
		mutex_init(&msm_pcie_dev[i].setup_lock);
		mutex_init(&msm_pcie_dev[i].recovery_lock);
		mutex_init(&msm_pcie_dev[i].linkdown_lock);
	}

	ret = platform_driver_register(&msm_pcie_driver);
@@ -1593,8 +1695,7 @@ static int msm_pcie_pm_suspend(struct pci_dev *dev,

	PCIE_DBG("RC%d\n", pcie_dev->rc_idx);

	if (dev) {
		PCIE_DBG("Save config space of RC%d.\n", pcie_dev->rc_idx);
	if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
		ret = pci_save_state(dev);
		pcie_dev->saved_state =	pci_store_saved_state(dev);
	}
@@ -1625,6 +1726,10 @@ static int msm_pcie_pm_suspend(struct pci_dev *dev,
		PCIE_DBG("RC%d: PM_Enter_L23 is NOT received\n",
			pcie_dev->rc_idx);

	if (options & MSM_PCIE_CONFIG_LINKDOWN)
		msm_pcie_disable(pcie_dev, PM_EXPT | PM_PIPE_CLK |
						PM_CLK | PM_VREG);
	else
		msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);

	return ret;
@@ -1640,10 +1745,14 @@ static void msm_pcie_fixup_suspend(struct pci_dev *dev)
	if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
		return;

	mutex_lock(&pcie_dev->recovery_lock);

	ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
	if (ret)
		pr_err("PCIe: RC%d got failure in suspend:%d.\n",
			pcie_dev->rc_idx, ret);

	mutex_unlock(&pcie_dev->recovery_lock);
}
DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
			  msm_pcie_fixup_suspend);
@@ -1672,10 +1781,12 @@ static int msm_pcie_pm_resume(struct pci_dev *dev,
		PCIE_DBG("dev->bus->number = %d dev->bus->primary = %d\n",
			 dev->bus->number, dev->bus->primary);

		pci_load_and_free_saved_state(dev, &pcie_dev->saved_state);

		if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
			pci_load_and_free_saved_state(dev,
					&pcie_dev->saved_state);
			pci_restore_state(dev);
		}
	}

	return ret;
}
@@ -1691,6 +1802,12 @@ void msm_pcie_fixup_resume(struct pci_dev *dev)
		pcie_dev->user_suspend)
		return;

	if (pcie_dev->recovery_pending) {
		PCIE_DBG("RC%d is pending recovery; so ignore resume.\n",
			pcie_dev->rc_idx);
		return;
	}

	ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
	if (ret)
		pr_err("PCIe: RC%d got failure in fixup resume:%d.\n",
@@ -1710,6 +1827,12 @@ void msm_pcie_fixup_resume_early(struct pci_dev *dev)
		pcie_dev->user_suspend)
		return;

	if (pcie_dev->recovery_pending) {
		PCIE_DBG("RC%d is pending recovery; so ignore resume.\n",
			pcie_dev->rc_idx);
		return;
	}

	ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
	if (ret)
		pr_err("PCIe: RC%d got failure in resume:%d.\n",
@@ -1763,13 +1886,14 @@ int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,

	switch (pm_opt) {
	case MSM_PCIE_SUSPEND:
		PCIE_DBG("User of RC%d requests to suspend the link\n",	rc_idx);
		if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED) {
		if ((msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
			&& !(options & MSM_PCIE_CONFIG_LINKDOWN)) {
			pr_err(
				"PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
				rc_idx, msm_pcie_dev[rc_idx].link_status);
			break;
		}
		if (!(options & MSM_PCIE_CONFIG_LINKDOWN))
			msm_pcie_dev[rc_idx].user_suspend = true;
		ret = msm_pcie_pm_suspend(dev, user, data, options);
		if (ret) {
@@ -1806,3 +1930,71 @@ out:
	return ret;
}
EXPORT_SYMBOL(msm_pcie_pm_control);

int msm_pcie_register_event(struct msm_pcie_register_event *reg)
{
	int ret = 0;
	struct msm_pcie_dev_t *pcie_dev;

	PCIE_DBG("\n");

	if (!reg) {
		pr_err("PCIe: Event registration is NULL\n");
		return -ENODEV;
	}

	if (!reg->user) {
		pr_err("PCIe: User of event registration is NULL\n");
		return -ENODEV;
	}

	pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user));

	if (pcie_dev) {
		pcie_dev->event_reg = reg;
		PCIE_DBG("Event 0x%x is registered for RC %d\n", reg->events,
				pcie_dev->rc_idx);
	} else {
		pr_err(
			"PCIe: did not find RC for pci endpoint device 0x%x.\n",
			(u32)reg->user);
		ret = -ENODEV;
	}

	return ret;
}
EXPORT_SYMBOL(msm_pcie_register_event);

int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
{
	int ret = 0;
	struct msm_pcie_dev_t *pcie_dev;

	PCIE_DBG("\n");

	if (!reg) {
		pr_err("PCIe: Event deregistration is NULL\n");
		return -ENODEV;
	}

	if (!reg->user) {
		pr_err("PCIe: User of event deregistration is NULL\n");
		return -ENODEV;
	}

	pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user));

	if (pcie_dev) {
		pcie_dev->event_reg = NULL;
		PCIE_DBG("Event is deregistered for RC %d\n",
				pcie_dev->rc_idx);
	} else {
		pr_err(
			"PCIe: did not find RC for pci endpoint device 0x%x.\n",
			(u32)reg->user);
		ret = -ENODEV;
	}

	return ret;
}
EXPORT_SYMBOL(msm_pcie_deregister_event);
+45 −0
Original line number Diff line number Diff line
@@ -44,6 +44,19 @@
#define PCIE_BUS_PRIV_DATA(pdev) \
	(((struct pci_sys_data *)pdev->bus->sysdata)->private_data)

/* PM control options */
#define PM_IRQ                   0x1
#define PM_CLK                   0x2
#define PM_GPIO                  0x4
#define PM_VREG                  0x8
#define PM_PIPE_CLK              0x10
#define PM_EXPT                  0x80000000
#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)

#define PCIE_CONF_SPACE_DW		      1024
#define PCIE_CLEAR			      0xDEADBEEF
#define PCIE_LINK_DOWN                        0xFFFFFFFF

enum msm_pcie_res {
	MSM_PCIE_RES_PARF,
	MSM_PCIE_RES_PHY,
@@ -160,6 +173,7 @@ struct msm_pcie_dev_t {
	bool                         cfg_access;
	spinlock_t                   cfg_lock;
	unsigned long                irqsave_flags;
	struct mutex                 setup_lock;

	struct irq_domain            *irq_domain;
	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
@@ -183,9 +197,23 @@ struct msm_pcie_dev_t {
	uint32_t                     rc_idx;
	bool                         enumerated;
	struct work_struct	     handle_wake_work;
	struct work_struct	     handle_linkdown_work;
	int                          handling_linkdown;
	bool                         recovery_pending;
	struct mutex                 recovery_lock;
	struct mutex                 linkdown_lock;
	ulong                        linkdown_counter;
	ulong                        wake_counter;
	u32			     ep_shadow[PCIE_CONF_SPACE_DW];
	u32                          rc_shadow[PCIE_CONF_SPACE_DW];
	bool                         shadow_en;
	struct msm_pcie_register_event *event_reg;
};

extern int msm_pcie_enumerate(u32 rc_idx);
extern int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options);
extern void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options);
extern void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc);
extern void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev);
extern int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev);
extern void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev);
@@ -194,4 +222,21 @@ extern int msm_pcie_get_debug_mask(void);
extern void pcie_phy_init(struct msm_pcie_dev_t *dev);
extern bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev);

static inline bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev)
{
	if (dev->link_status != MSM_PCIE_LINK_ENABLED)
		return false;

	if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29)))
		return false;

	if (readl_relaxed(dev->dm_core) == PCIE_LINK_DOWN)
		return false;

	if (readl_relaxed(dev->conf) == PCIE_LINK_DOWN)
		return false;

	return true;
}

#endif
+247 −7
Original line number Diff line number Diff line
@@ -24,6 +24,8 @@
#include <linux/pci.h>
#include <mach/irqs.h>
#include <linux/irqdomain.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include "pcie.h"

/* Any address will do here, as it won't be dereferenced */
@@ -37,13 +39,51 @@

#define PCIE20_MSI_CTRL_MAX 8

#define LINKDOWN_INIT_WAITING_US_MIN    995
#define LINKDOWN_INIT_WAITING_US_MAX    1005
#define LINKDOWN_WAITING_US_MIN         4900
#define LINKDOWN_WAITING_US_MAX         5100
#define LINKDOWN_WAITING_COUNT          200

static int msm_pcie_recover_link(struct msm_pcie_dev_t *dev)
{
	int ret;

	ret = msm_pcie_enable(dev, PM_PIPE_CLK | PM_CLK | PM_VREG);

	if (!ret) {
		PCIE_DBG("Recover config space of RC%d and its EP\n",
				dev->rc_idx);
		PCIE_DBG("Recover RC%d\n", dev->rc_idx);
		msm_pcie_cfg_recover(dev, true);
		PCIE_DBG("Recover EP of RC%d\n", dev->rc_idx);
		msm_pcie_cfg_recover(dev, false);
		dev->shadow_en = true;

		if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
			dev->event_reg && dev->event_reg->callback &&
			(dev->event_reg->events & MSM_PCIE_EVENT_LINKUP)) {
			struct msm_pcie_notify *notify =
					&dev->event_reg->notify;
			notify->event = MSM_PCIE_EVENT_LINKUP;
			notify->user = dev->event_reg->user;
			PCIE_DBG("Linkup callback for RC%d\n", dev->rc_idx);
			dev->event_reg->callback(notify);
		}
	}

	return ret;
}

static void handle_wake_func(struct work_struct *work)
{
	int ret;
	struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
					handle_wake_work);

	PCIE_DBG("Wake work for RC %d\n", dev->rc_idx);
	PCIE_DBG("PCIe: Wake work for RC%d\n", dev->rc_idx);

	mutex_lock(&dev->recovery_lock);

	if (!dev->enumerated) {
		ret = msm_pcie_enumerate(dev->rc_idx);
@@ -51,26 +91,213 @@ static void handle_wake_func(struct work_struct *work)
			pr_err(
				"PCIe: failed to enable RC%d upon wake request from the device.\n",
				dev->rc_idx);
			return;
			goto out;
		}

		if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
			dev->event_reg && dev->event_reg->callback &&
			(dev->event_reg->events & MSM_PCIE_EVENT_LINKUP)) {
			struct msm_pcie_notify *notify =
					&dev->event_reg->notify;
			notify->event = MSM_PCIE_EVENT_LINKUP;
			notify->user = dev->event_reg->user;
			PCIE_DBG(
				"PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
				dev->rc_idx);
			dev->event_reg->callback(notify);
		}
	} else {
		pr_err("PCIe: %s: RC %d has already been enumerated.\n",
			__func__, dev->rc_idx);
		int waiting_cycle = 0;
		usleep_range(LINKDOWN_INIT_WAITING_US_MIN,
				LINKDOWN_INIT_WAITING_US_MAX);
		while ((dev->handling_linkdown > 0) &&
			(waiting_cycle++ < LINKDOWN_WAITING_COUNT)) {
			usleep_range(LINKDOWN_WAITING_US_MIN,
				LINKDOWN_WAITING_US_MAX);
		}

		if (waiting_cycle == LINKDOWN_WAITING_COUNT)
			pr_err(
				"PCIe: Linkdown handling for RC%d is not finished after max waiting time.\n",
				dev->rc_idx);

		if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
			PCIE_DBG(
				"PCIe: The link status of RC%d is up. Check if it is really up.\n",
					dev->rc_idx);

			if (msm_pcie_confirm_linkup(dev)) {
				PCIE_DBG(
					"PCIe: The link status of RC%d is really up; so ignore wake IRQ.\n",
					dev->rc_idx);
				goto out;
			} else {
				dev->link_status = MSM_PCIE_LINK_DISABLED;
				pr_err(
					"PCIe: The link of RC%d is actually down; start recovering link.\n",
					dev->rc_idx);
				msm_pcie_disable(dev, PM_EXPT | PM_PIPE_CLK |
							PM_CLK | PM_VREG);
				ret = msm_pcie_recover_link(dev);
				if (ret) {
					pr_err(
						"PCIe:failed to recover link for RC%d after receive wake IRQ.\n",
						dev->rc_idx);
					goto out;
				}
			}
		} else {
			PCIE_DBG("PCIe: The link status of RC%d is down.\n",
					dev->rc_idx);

			if (dev->recovery_pending) {
				static u32 retries = 1;
				PCIE_DBG(
					"PCIe: Start recovering link for RC%d after receive wake IRQ.\n",
					dev->rc_idx);
				ret = msm_pcie_recover_link(dev);
				if (ret) {
					pr_err(
						"PCIe:failed to enable link for RC%d in No. %d try after receive wake IRQ.\n",
						dev->rc_idx, retries++);
					goto out;
				} else {
					dev->recovery_pending = false;
					PCIE_DBG(
						"PCIe: Successful recovery for RC%d in No. %d try.\n",
						dev->rc_idx, retries);
					retries = 1;
				}
			} else {
				PCIE_DBG(
					"PCIe: No pending recovery for RC%d; so ignore wake IRQ.\n",
					dev->rc_idx);
				goto out;
			}
		}
	}

out:
	mutex_unlock(&dev->recovery_lock);
}

static irqreturn_t handle_wake_irq(int irq, void *data)
{
	struct msm_pcie_dev_t *dev = data;

	dev->wake_counter++;
	PCIE_DBG("PCIe: No. %ld wake IRQ for RC%d\n",
			dev->wake_counter, dev->rc_idx);

	PCIE_DBG("PCIe WAKE is asserted by Endpoint of RC%d\n", dev->rc_idx);

	if (!dev->enumerated) {
		PCIE_DBG("Start enumeating RC%d\n", dev->rc_idx);
		schedule_work(&dev->handle_wake_work);
	} else {
		PCIE_DBG("Wake up RC%d\n", dev->rc_idx);
		__pm_stay_awake(&dev->ws);
		__pm_relax(&dev->ws);

		schedule_work(&dev->handle_wake_work);
	}

	return IRQ_HANDLED;
}

static void handle_linkdown_func(struct work_struct *work)
{
	int ret;
	struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
					handle_linkdown_work);

	PCIE_DBG("PCIe: Linkdown work for RC%d\n", dev->rc_idx);

	mutex_lock(&dev->linkdown_lock);

	if (dev->event_reg && dev->event_reg->callback &&
		(dev->event_reg->events & MSM_PCIE_EVENT_LINKDOWN)) {
		struct msm_pcie_notify *notify = &dev->event_reg->notify;
		notify->event = MSM_PCIE_EVENT_LINKDOWN;
		notify->user = dev->event_reg->user;
		PCIE_DBG("PCIe: Linkdown callback for RC%d\n", dev->rc_idx);
		dev->event_reg->callback(notify);

		if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
			PCIE_DBG(
				"PCIe: Client of RC%d does not enable link in callback; so disable the link\n",
				dev->rc_idx);
			dev->recovery_pending = true;
			msm_pcie_disable(dev,
				PM_EXPT | PM_PIPE_CLK | PM_CLK | PM_VREG);
		} else {
			PCIE_DBG(
				"PCIe: Client of RC%d has enabled link in callback; so recover config space\n",
				dev->rc_idx);
			PCIE_DBG("PCIe: Recover RC%d\n", dev->rc_idx);
			msm_pcie_cfg_recover(dev, true);
			PCIE_DBG("PCIe: Recover EP of RC%d\n", dev->rc_idx);
			msm_pcie_cfg_recover(dev, false);
			dev->shadow_en = true;

			if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
				dev->event_reg && dev->event_reg->callback &&
				(dev->event_reg->events &
					MSM_PCIE_EVENT_LINKUP)) {
				struct msm_pcie_notify *notify =
						&dev->event_reg->notify;
				notify->event = MSM_PCIE_EVENT_LINKUP;
				notify->user = dev->event_reg->user;
				PCIE_DBG("PCIe: Linkup callback for RC%d\n",
						dev->rc_idx);
				dev->event_reg->callback(notify);
			}
		}
	} else {
		PCIE_DBG(
			"PCIe: No registration for linkdown of RC%d; so recover the link by RC\n",
			dev->rc_idx);

		msm_pcie_disable(dev, PM_EXPT | PM_PIPE_CLK | PM_CLK | PM_VREG);
		ret = msm_pcie_recover_link(dev);

		if (ret) {
			pr_err(
				"PCIe:failed to enable RC%d again upon linkdown.\n",
				dev->rc_idx);
			goto out;
		}
	}

out:
	dev->handling_linkdown--;
	if (dev->handling_linkdown < 0)
		pr_err("PCIe:handling_linkdown for RC%d is %d\n",
			dev->rc_idx, dev->handling_linkdown);
	mutex_unlock(&dev->linkdown_lock);
}

static irqreturn_t handle_linkdown_irq(int irq, void *data)
{
	struct msm_pcie_dev_t *dev = data;

	dev->linkdown_counter++;
	dev->handling_linkdown++;
	PCIE_DBG("PCIe: No. %ld linkdown IRQ for RC%d: handling_linkdown:%d\n",
		dev->linkdown_counter, dev->rc_idx, dev->handling_linkdown);

	if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
		PCIE_DBG(
			"PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
			dev->rc_idx);
	} else {
		dev->link_status = MSM_PCIE_LINK_DISABLED;
		dev->shadow_en = false;
		/* assert PERST */
		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
				dev->gpio[MSM_PCIE_GPIO_PERST].on);
		pr_err("PCIe link is down for RC%d\n", dev->rc_idx);
		schedule_work(&dev->handle_linkdown_work);
	}

	return IRQ_HANDLED;
@@ -396,6 +623,19 @@ int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)

	wakeup_source_init(&dev->ws, "pcie_wakeup_source");

	/* register handler for linkdown interrupt */
	rc = devm_request_irq(pdev,
		dev->irq[MSM_PCIE_INT_LINK_DOWN].num, handle_linkdown_irq,
		IRQF_TRIGGER_RISING, dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
		dev);
	if (rc) {
		pr_err("PCIe: Unable to request linkdown interrupt:%d\n",
			dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
		return rc;
	}

	INIT_WORK(&dev->handle_linkdown_work, handle_linkdown_func);

	/* register handler for physical MSI interrupt line */
	rc = devm_request_irq(pdev,
		dev->irq[MSM_PCIE_INT_MSI].num, handle_msi_irq,