Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e65b5133 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "pci: msm: Add dynamic iATU config for ep config space access"

parents 887e6897 aae80a9b
Loading
Loading
Loading
Loading
+79 −122
Original line number Diff line number Diff line
@@ -636,9 +636,6 @@ struct msm_pcie_device_info {
	short short_bdf;
	u32 sid;
	int domain;
	void __iomem *conf_base;
	unsigned long phy_address;
	u32 dev_ctrlstts_offset;
	struct msm_pcie_register_event *event_reg;
	bool registered;
};
@@ -3031,9 +3028,8 @@ static int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
	void __iomem *config_base;
	bool rc = false;
	u32 rc_idx, *filtered_bdf;
	int rv = 0;
	int i, rv = 0;
	u32 bdf = BDF_OFFSET(bus->number, devfn);
	int i;

	dev = PCIE_BUS_PRIV_DATA(bus);

@@ -3099,28 +3095,14 @@ static int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
		}
	}

	if (!rc && !dev->enumerated)
	if (!rc)
		msm_pcie_cfg_bdf(dev, bus->number, devfn);

	word_offset = where & ~0x3;
	byte_offset = where & 0x3;
	mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);

	if (rc || !dev->enumerated) {
	config_base = rc ? dev->dm_core : dev->conf;
	} else {
		for (i = 0; i < MAX_DEVICE_NUM; i++) {
			if (dev->pcidev_table[i].bdf == bdf) {
				config_base = dev->pcidev_table[i].conf_base;
				break;
			}
		}
		if (i == MAX_DEVICE_NUM) {
			*val = ~0;
			rv = PCIBIOS_DEVICE_NOT_FOUND;
			goto unlock;
		}
	}

	rd_val = readl_relaxed(config_base + word_offset);

@@ -3665,25 +3647,6 @@ static int pcie_phy_init(struct msm_pcie_dev_t *dev)
	return 0;
}

static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
{
	int i;
	u8 type;
	struct msm_pcie_device_info *dev_table = dev->pcidev_table;

	for (i = 0; i < MAX_DEVICE_NUM; i++) {
		if (!dev_table[i].bdf)
			break;

		type = dev_table[i].bdf >> 24 == 0x1 ?
			PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;

		msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
			dev_table[i].phy_address + SZ_4K - 1,
			dev_table[i].bdf);
	}
}

static void msm_pcie_config_core_preset(struct msm_pcie_dev_t *pcie_dev)
{
	u32 supported_link_speed =
@@ -3746,14 +3709,11 @@ static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
	/*
	 * program and enable address translation region 0 (device config
	 * address space); region type config;
	 * axi config address range to device config address range
	 * axi config address range to device config address range. Enable
	 * translation for bus 1 dev 0 fn 0.
	 */
	if (dev->enumerated) {
		msm_pcie_iatu_config_all_ep(dev);
	} else {
	dev->current_bdf = 0; /* to force IATU re-config */
	msm_pcie_cfg_bdf(dev, 1, 0);
	}

	/* configure N_FTS */
	PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
@@ -4830,14 +4790,10 @@ static void msm_pcie_disable(struct msm_pcie_dev_t *dev)
static int msm_pcie_config_device_table(struct pci_dev *pcidev, void *pdev)
{
	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
	struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
	int ret = 0;
	u32 rc_idx = pcie_dev->rc_idx;
	u32 i;
	u32 bdf = 0;
	u8 type;
	u32 h_type;
	u32 bme;

	PCIE_DBG(pcie_dev,
		"PCI device found: vendor-id:0x%x device-id:0x%x\n",
@@ -4847,8 +4803,6 @@ static int msm_pcie_config_device_table(struct pci_dev *pcidev, void *pdev)
		return ret;

	bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
	type = pcidev->bus->number == 1 ?
		PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;

	for (i = 0; i < MAX_DEVICE_NUM; i++) {
		struct msm_pcie_device_info *dev_table_t =
@@ -4859,27 +4813,15 @@ static int msm_pcie_config_device_table(struct pci_dev *pcidev, void *pdev)

		dev_table_t->dev = pcidev;
		dev_table_t->domain = rc_idx;
		dev_table_t->conf_base = pcie_dev->conf + i * SZ_4K;
		dev_table_t->phy_address = axi_conf->start + i * SZ_4K;

		msm_pcie_iatu_config(pcie_dev, i, type,
				     dev_table_t->phy_address,
				     dev_table_t->phy_address + SZ_4K - 1, bdf);

		h_type = readl_relaxed(dev_table_t->conf_base +
				       PCIE20_HEADER_TYPE);

		bme = readl_relaxed(dev_table_t->conf_base +
				    PCIE20_COMMAND_STATUS);

		if (h_type & (1 << 16)) {
			pci_write_config_dword(pcidev, PCIE20_COMMAND_STATUS,
					       bme | 0x06);
		} else {
		if (pci_pcie_type(pcidev) == PCI_EXP_TYPE_ENDPOINT) {
			pcie_dev->num_ep++;
			dev_table_t->registered = false;
		}

		if (pci_pcie_type(pcidev) == PCI_EXP_TYPE_UPSTREAM)
			pci_set_master(pcidev);

		if (pcie_dev->num_ep > 1)
			pcie_dev->pending_ep_reg = true;

@@ -5218,16 +5160,30 @@ static void handle_wake_func(struct work_struct *work)
	mutex_unlock(&dev->recovery_lock);
}

static struct pci_dev *find_pci_dev_from_bdf(u16 bdf, int domain)
{
	u8 busnr, devfn;
	struct pci_bus *bus;

	busnr = bdf >> 8;
	devfn = bdf & 0xff;

	bus = pci_find_bus(domain, busnr);
	if (!bus)
		return NULL;

	return pci_get_slot(bus, devfn);
}

static irqreturn_t handle_aer_irq(int irq, void *data)
{
	struct msm_pcie_dev_t *dev = data;

	struct pci_dev *pcidev;
	u16 aer_cap, ep_src_bdf = 0, ep_dev_stts = 0;
	int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
	int ep_corr_val = 0, ep_uncorr_val = 0;
	int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
	u32 ep_dev_ctrlstts_offset = 0;
	int i, j, ep_src_bdf = 0;
	void __iomem *ep_base = NULL;
	u32 ep_corr_val = 0, ep_uncorr_val = 0;
	int rc_dev_ctrlstts = 0;
	int i;

	PCIE_DBG2(dev,
		"AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
@@ -5269,43 +5225,42 @@ static irqreturn_t handle_aer_irq(int irq, void *data)

	for (i = 0; i < 2; i++) {
		if (i)
			ep_src_bdf = readl_relaxed(dev->dm_core +
				PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
		else
			ep_src_bdf = (readl_relaxed(dev->dm_core +
				PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
				PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff) >> 16;
		else
			ep_src_bdf = readl_relaxed(dev->dm_core +
				PCIE20_AER_ERR_SRC_ID_REG) & 0xffff;

		if (!ep_src_bdf)
			continue;

		for (j = 0; j < MAX_DEVICE_NUM; j++) {
			if (ep_src_bdf == dev->pcidev_table[j].bdf) {
		pcidev = find_pci_dev_from_bdf(ep_src_bdf,
					       pci_domain_nr(dev->dev->bus));
		if (!pcidev) {
			PCIE_ERR(dev,
				"PCIe: RC%d no endpoint found for reported error\n",
				dev->rc_idx);
			goto out;
		}

		PCIE_DBG2(dev,
			  "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
			  i ? "Uncorrectable" : "Correctable",
					dev->pcidev_table[j].bdf >> 24,
					dev->pcidev_table[j].bdf >> 19 & 0x1f,
					dev->pcidev_table[j].bdf >> 16 & 0x07);
				ep_base = dev->pcidev_table[j].conf_base;
				ep_dev_ctrlstts_offset =
				dev->pcidev_table[j].dev_ctrlstts_offset;
				break;
			}
		}
			  ep_src_bdf >> 24, ep_src_bdf >> 19 & 0x1f,
			  ep_src_bdf >> 16 & 0x07);

		if (!ep_base) {
			PCIE_ERR(dev,
				"PCIe: RC%d no endpoint found for reported error\n",
				dev->rc_idx);
		aer_cap = pci_find_ext_capability(pcidev, PCI_EXT_CAP_ID_ERR);
		if (!aer_cap) {
			PCIE_ERR(dev, "PCIe: BDF 0x%04x does not support AER\n",
				 PCI_DEVID(pcidev->bus->number, pcidev->devfn));
			goto out;
		}

		ep_uncorr_val = readl_relaxed(ep_base +
					PCIE20_AER_UNCORR_ERR_STATUS_REG);
		ep_corr_val = readl_relaxed(ep_base +
					PCIE20_AER_CORR_ERR_STATUS_REG);
		ep_dev_ctrlstts = readl_relaxed(ep_base +
					ep_dev_ctrlstts_offset);
		pci_read_config_dword(pcidev, aer_cap + PCI_ERR_UNCOR_STATUS,
				      &ep_uncorr_val);
		pci_read_config_dword(pcidev, aer_cap + PCI_ERR_COR_STATUS,
				      &ep_corr_val);
		pcie_capability_read_word(pcidev, PCI_EXP_DEVSTA, &ep_dev_stts);

		if (ep_uncorr_val)
			PCIE_DBG(dev,
@@ -5316,22 +5271,25 @@ static irqreturn_t handle_aer_irq(int irq, void *data)
				"EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
				ep_corr_val);

		if ((ep_dev_ctrlstts >> 18) & 0x1)
		if (ep_dev_stts & PCI_EXP_DEVSTA_FED)
			dev->ep_fatal_counter++;
		if ((ep_dev_ctrlstts >> 17) & 0x1)
		if (ep_dev_stts & PCI_EXP_DEVSTA_NFED)
			dev->ep_non_fatal_counter++;
		if ((ep_dev_ctrlstts >> 16) & 0x1)
		if (ep_dev_stts & PCI_EXP_DEVSTA_CED)
			dev->ep_corr_counter++;

		msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
					BIT(18)|BIT(17)|BIT(16));
		pcie_capability_clear_and_set_word(pcidev, PCI_EXP_DEVSTA, 0,
						   PCI_EXP_DEVSTA_CED |
						   PCI_EXP_DEVSTA_NFED |
						   PCI_EXP_DEVSTA_FED);

		msm_pcie_write_reg_field(ep_base,
				PCIE20_AER_UNCORR_ERR_STATUS_REG,
				0x3fff031, 0x3fff031);
		msm_pcie_write_reg_field(ep_base,
				PCIE20_AER_CORR_ERR_STATUS_REG,
				0xf1c1, 0xf1c1);
		pci_write_config_dword(pcidev, aer_cap + PCI_ERR_COR_STATUS,
				       ep_corr_val);

#ifdef CONFIG_PCI_QTI
		/* Clear status bits for ERR_NONFATAL errors only */
		pci_cleanup_aer_uncorrect_error_status(pcidev);
#endif
	}
out:
	if (((dev->rc_corr_counter < corr_counter_limit) &&
@@ -5471,6 +5429,7 @@ static irqreturn_t handle_global_irq(int irq, void *data)
	struct msm_pcie_dev_t *dev = data;
	unsigned long irqsave_flags;
	u32 status = 0;
	irqreturn_t ret = IRQ_HANDLED;

	spin_lock_irqsave(&dev->irq_lock, irqsave_flags);

@@ -5478,8 +5437,7 @@ static irqreturn_t handle_global_irq(int irq, void *data)
		PCIE_DBG2(dev,
			"PCIe: RC%d is currently suspending.\n",
			dev->rc_idx);
		spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
		return IRQ_HANDLED;
		goto done;
	}

	status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
@@ -5507,13 +5465,13 @@ static irqreturn_t handle_global_irq(int irq, void *data)
				PCIE_DBG(dev,
					"PCIe: RC%d: AER legacy event.\n",
					dev->rc_idx);
				handle_aer_irq(irq, data);
				ret = IRQ_WAKE_THREAD;
				break;
			case MSM_PCIE_INT_EVT_AER_ERR:
				PCIE_DBG(dev,
					"PCIe: RC%d: AER event.\n",
					dev->rc_idx);
				handle_aer_irq(irq, data);
				ret = IRQ_WAKE_THREAD;
				break;
			default:
				PCIE_DUMP(dev,
@@ -5523,9 +5481,10 @@ static irqreturn_t handle_global_irq(int irq, void *data)
		}
	}

done:
	spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);

	return IRQ_HANDLED;
	return ret;
}

static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
@@ -5544,10 +5503,11 @@ static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
	}

	if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
		rc = devm_request_irq(pdev,
		rc = devm_request_threaded_irq(pdev,
				dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
				handle_global_irq,
				IRQF_TRIGGER_RISING,
				handle_aer_irq,
				IRQF_TRIGGER_RISING | IRQF_ONESHOT,
				dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
				dev);
		if (rc) {
@@ -6390,9 +6350,6 @@ static int msm_pcie_probe(struct platform_device *pdev)
		pcie_dev->pcidev_table[i].short_bdf = 0;
		pcie_dev->pcidev_table[i].sid = 0;
		pcie_dev->pcidev_table[i].domain = rc_idx;
		pcie_dev->pcidev_table[i].conf_base = NULL;
		pcie_dev->pcidev_table[i].phy_address = 0;
		pcie_dev->pcidev_table[i].dev_ctrlstts_offset = 0;
		pcie_dev->pcidev_table[i].event_reg = NULL;
		pcie_dev->pcidev_table[i].registered = true;
	}