Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9b43c390 authored by Suraj Jaiswal's avatar Suraj Jaiswal Committed by Gerrit - the friendly Code Review server
Browse files

net: stmmac: Add support for smmu enablement



Add smmu enablement and smmu mapping for ethernet.

Change-Id: I4cc2a1834cdeb9adf25a735bf41197be70a29a3b
Acked-by: default avatarNagarjuna Chaganti <nchagant@qti.qualcomm.com>
Signed-off-by: default avatarSuraj Jaiswal <jsuraj@codeaurora.org>
parent 6093778c
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -33,10 +33,10 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)

	len = nopaged_len - bmax;

	des2 = dma_map_single(priv->device, skb->data,
	des2 = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
			      bmax, DMA_TO_DEVICE);
	desc->des2 = cpu_to_le32(des2);
	if (dma_mapping_error(priv->device, des2))
	if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
		return -1;
	tx_q->tx_skbuff_dma[entry].buf = des2;
	tx_q->tx_skbuff_dma[entry].len = bmax;
@@ -50,11 +50,11 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
		desc = tx_q->dma_tx + entry;

		if (len > bmax) {
			des2 = dma_map_single(priv->device,
			des2 = dma_map_single(GET_MEM_PDEV_DEV,
					      (skb->data + bmax * i),
					      bmax, DMA_TO_DEVICE);
			desc->des2 = cpu_to_le32(des2);
			if (dma_mapping_error(priv->device, des2))
			if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
				return -1;
			tx_q->tx_skbuff_dma[entry].buf = des2;
			tx_q->tx_skbuff_dma[entry].len = bmax;
@@ -63,11 +63,11 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
			len -= bmax;
			i++;
		} else {
			des2 = dma_map_single(priv->device,
			des2 = dma_map_single(GET_MEM_PDEV_DEV,
					      (skb->data + bmax * i), len,
					      DMA_TO_DEVICE);
			desc->des2 = cpu_to_le32(des2);
			if (dma_mapping_error(priv->device, des2))
			if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
				return -1;
			tx_q->tx_skbuff_dma[entry].buf = des2;
			tx_q->tx_skbuff_dma[entry].len = len;
+81 −6
Original line number Diff line number Diff line
@@ -16,6 +16,9 @@
#include <linux/ipc_logging.h>
#include <linux/poll.h>
#include <linux/debugfs.h>
#include <linux/dma-iommu.h>
#include <linux/iommu.h>


#include "stmmac.h"
#include "stmmac_platform.h"
@@ -101,6 +104,8 @@

bool phy_intr_en;

struct emac_emb_smmu_cb_ctx emac_emb_smmu_ctx = {0};

static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
{
	return readl(ethqos->rgmii_base + offset);
@@ -598,6 +603,64 @@ static int ethqos_phy_intr_enable(struct qcom_ethqos *ethqos)
	return ret;
}

static const struct of_device_id qcom_ethqos_match[] = {
	{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
	{ .compatible = "qcom,sdxprairie-ethqos", .data = &emac_v2_3_2_por},
	{ .compatible = "qcom,stmmac-ethqos", },
	{ .compatible = "qcom,emac-smmu-embedded", },
	{ }
};

static void emac_emb_smmu_exit(void)
{
	emac_emb_smmu_ctx.valid = false;
	emac_emb_smmu_ctx.pdev_master = NULL;
	emac_emb_smmu_ctx.smmu_pdev = NULL;
	emac_emb_smmu_ctx.iommu_domain = NULL;
}

static int emac_emb_smmu_cb_probe(struct platform_device *pdev)
{
	int result = 0;
	u32 iova_ap_mapping[2];
	struct device *dev = &pdev->dev;
	int atomic_ctx = 1;
	int fast = 1;
	int bypass = 1;

	ETHQOSDBG("EMAC EMB SMMU CB probe: smmu pdev=%p\n", pdev);

	result = of_property_read_u32_array(dev->of_node,
					    "qcom,iommu-dma-addr-pool",
					    iova_ap_mapping,
					    ARRAY_SIZE(iova_ap_mapping));
	if (result) {
		ETHQOSERR("Failed to read EMB start/size iova addresses\n");
		return result;
	}

	emac_emb_smmu_ctx.smmu_pdev = pdev;

	if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
	    dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
		ETHQOSERR("DMA set 32bit mask failed\n");
		return -EOPNOTSUPP;
	}

	emac_emb_smmu_ctx.valid = true;

	emac_emb_smmu_ctx.iommu_domain =
		iommu_get_domain_for_dev(&emac_emb_smmu_ctx.smmu_pdev->dev);

	ETHQOSINFO("Successfully attached to IOMMU\n");
	if (emac_emb_smmu_ctx.pdev_master)
		goto smmu_probe_done;

smmu_probe_done:
	emac_emb_smmu_ctx.ret = result;
	return result;
}

static int qcom_ethqos_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
@@ -608,6 +671,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
	struct resource *res;
	int ret;

	if (of_device_is_compatible(pdev->dev.of_node,
				    "qcom,emac-smmu-embedded"))
		return emac_emb_smmu_cb_probe(pdev);
	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
	if (ret)
		return ret;
@@ -678,6 +744,20 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
	}
	ETHQOSDBG(": emac_core_version = %d\n", ethqos->emac_ver);

	if (of_property_read_bool(pdev->dev.of_node, "qcom,arm-smmu")) {
		emac_emb_smmu_ctx.pdev_master = pdev;
		ret = of_platform_populate(pdev->dev.of_node,
					   qcom_ethqos_match, NULL, &pdev->dev);
		if (ret)
			ETHQOSERR("Failed to populate EMAC platform\n");
		if (emac_emb_smmu_ctx.ret) {
			ETHQOSERR("smmu probe failed\n");
			of_platform_depopulate(&pdev->dev);
			ret = emac_emb_smmu_ctx.ret;
			emac_emb_smmu_ctx.ret = 0;
		}
	}

	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
	if (ret)
		goto err_clk;
@@ -711,17 +791,12 @@ static int qcom_ethqos_remove(struct platform_device *pdev)

	if (phy_intr_en)
		free_irq(ethqos->phy_intr, ethqos);
	emac_emb_smmu_exit();
	ethqos_disable_regulators(ethqos);

	return ret;
}

static const struct of_device_id qcom_ethqos_match[] = {
	{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
	{ .compatible = "qcom,sdxprairie-ethqos", .data = &emac_v2_3_2_por},
	{ .compatible = "qcom,stmmac-ethqos", },
	{ }
};
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);

static struct platform_driver qcom_ethqos_driver = {
+6 −6
Original line number Diff line number Diff line
@@ -37,10 +37,10 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)

	if (nopaged_len > BUF_SIZE_8KiB) {

		des2 = dma_map_single(priv->device, skb->data, bmax,
		des2 = dma_map_single(GET_MEM_PDEV_DEV, skb->data, bmax,
				      DMA_TO_DEVICE);
		desc->des2 = cpu_to_le32(des2);
		if (dma_mapping_error(priv->device, des2))
		if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
			return -1;

		tx_q->tx_skbuff_dma[entry].buf = des2;
@@ -58,10 +58,10 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
		else
			desc = tx_q->dma_tx + entry;

		des2 = dma_map_single(priv->device, skb->data + bmax, len,
		des2 = dma_map_single(GET_MEM_PDEV_DEV, skb->data + bmax, len,
				      DMA_TO_DEVICE);
		desc->des2 = cpu_to_le32(des2);
		if (dma_mapping_error(priv->device, des2))
		if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
			return -1;
		tx_q->tx_skbuff_dma[entry].buf = des2;
		tx_q->tx_skbuff_dma[entry].len = len;
@@ -72,10 +72,10 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
				STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
				skb->len);
	} else {
		des2 = dma_map_single(priv->device, skb->data,
		des2 = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
				      nopaged_len, DMA_TO_DEVICE);
		desc->des2 = cpu_to_le32(des2);
		if (dma_mapping_error(priv->device, des2))
		if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
			return -1;
		tx_q->tx_skbuff_dma[entry].buf = des2;
		tx_q->tx_skbuff_dma[entry].len = nopaged_len;
+17 −0
Original line number Diff line number Diff line
@@ -243,6 +243,23 @@ enum stmmac_state {
	STMMAC_SERVICE_SCHED,
};

struct emac_emb_smmu_cb_ctx {
	bool valid;
	struct platform_device *pdev_master;
	struct platform_device *smmu_pdev;
	struct dma_iommu_mapping *mapping;
	struct iommu_domain *iommu_domain;
	u32 va_start;
	u32 va_size;
	u32 va_end;
	int ret;
};

extern struct emac_emb_smmu_cb_ctx emac_emb_smmu_ctx;

#define GET_MEM_PDEV_DEV (emac_emb_smmu_ctx.valid ? \
			&emac_emb_smmu_ctx.smmu_pdev->dev : priv->device)

extern bool phy_intr_en;
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
+29 −28
Original line number Diff line number Diff line
@@ -1276,12 +1276,12 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)

	if (tx_q->tx_skbuff_dma[i].buf) {
		if (tx_q->tx_skbuff_dma[i].map_as_page)
			dma_unmap_page(priv->device,
			dma_unmap_page(GET_MEM_PDEV_DEV,
				       tx_q->tx_skbuff_dma[i].buf,
				       tx_q->tx_skbuff_dma[i].len,
				       DMA_TO_DEVICE);
		else
			dma_unmap_single(priv->device,
			dma_unmap_single(GET_MEM_PDEV_DEV,
					 tx_q->tx_skbuff_dma[i].buf,
					 tx_q->tx_skbuff_dma[i].len,
					 DMA_TO_DEVICE);
@@ -1497,11 +1497,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)

		/* Free DMA regions of consistent memory previously allocated */
		if (!priv->extend_desc)
			dma_free_coherent(priv->device,
			dma_free_coherent(GET_MEM_PDEV_DEV,
					  DMA_RX_SIZE * sizeof(struct dma_desc),
					  rx_q->dma_rx, rx_q->dma_rx_phy);
		else
			dma_free_coherent(priv->device, DMA_RX_SIZE *
			dma_free_coherent(GET_MEM_PDEV_DEV, DMA_RX_SIZE *
					  sizeof(struct dma_extended_desc),
					  rx_q->dma_erx, rx_q->dma_rx_phy);

@@ -1529,11 +1529,11 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)

		/* Free DMA regions of consistent memory previously allocated */
		if (!priv->extend_desc)
			dma_free_coherent(priv->device,
			dma_free_coherent(GET_MEM_PDEV_DEV,
					  DMA_TX_SIZE * sizeof(struct dma_desc),
					  tx_q->dma_tx, tx_q->dma_tx_phy);
		else
			dma_free_coherent(priv->device, DMA_TX_SIZE *
			dma_free_coherent(GET_MEM_PDEV_DEV, DMA_TX_SIZE *
					  sizeof(struct dma_extended_desc),
					  tx_q->dma_etx, tx_q->dma_tx_phy);

@@ -1569,8 +1569,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
		pp_params.pool_size = DMA_RX_SIZE;
		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
		pp_params.order = ilog2(num_pages);
		pp_params.nid = dev_to_node(priv->device);
		pp_params.dev = priv->device;
		pp_params.nid = dev_to_node(GET_MEM_PDEV_DEV);
		pp_params.dev = GET_MEM_PDEV_DEV;
		pp_params.dma_dir = DMA_FROM_DEVICE;

		rx_q->page_pool = page_pool_create(&pp_params);
@@ -1586,7 +1586,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
			goto err_dma;

		if (priv->extend_desc) {
			rx_q->dma_erx = dma_alloc_coherent(priv->device,
			rx_q->dma_erx = dma_alloc_coherent(GET_MEM_PDEV_DEV,
							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
							   &rx_q->dma_rx_phy,
							   GFP_KERNEL);
@@ -1594,7 +1594,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
				goto err_dma;

		} else {
			rx_q->dma_rx = dma_alloc_coherent(priv->device,
			rx_q->dma_rx = dma_alloc_coherent(GET_MEM_PDEV_DEV,
							  DMA_RX_SIZE * sizeof(struct dma_desc),
							  &rx_q->dma_rx_phy,
							  GFP_KERNEL);
@@ -1645,14 +1645,14 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
			goto err_dma;

		if (priv->extend_desc) {
			tx_q->dma_etx = dma_alloc_coherent(priv->device,
			tx_q->dma_etx = dma_alloc_coherent(GET_MEM_PDEV_DEV,
							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
							   &tx_q->dma_tx_phy,
							   GFP_KERNEL);
			if (!tx_q->dma_etx)
				goto err_dma;
		} else {
			tx_q->dma_tx = dma_alloc_coherent(priv->device,
			tx_q->dma_tx = dma_alloc_coherent(GET_MEM_PDEV_DEV,
							  DMA_TX_SIZE * sizeof(struct dma_desc),
							  &tx_q->dma_tx_phy,
							  GFP_KERNEL);
@@ -1927,12 +1927,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)

		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
			if (tx_q->tx_skbuff_dma[entry].map_as_page)
				dma_unmap_page(priv->device,
				dma_unmap_page(GET_MEM_PDEV_DEV,
					       tx_q->tx_skbuff_dma[entry].buf,
					       tx_q->tx_skbuff_dma[entry].len,
					       DMA_TO_DEVICE);
			else
				dma_unmap_single(priv->device,
				dma_unmap_single(GET_MEM_PDEV_DEV,
						 tx_q->tx_skbuff_dma[entry].buf,
						 tx_q->tx_skbuff_dma[entry].len,
						 DMA_TO_DEVICE);
@@ -2987,9 +2987,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);

	/* first descriptor: fill Headers on Buf1 */
	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
	des = dma_map_single(GET_MEM_PDEV_DEV, skb->data, skb_headlen(skb),
			     DMA_TO_DEVICE);
	if (dma_mapping_error(priv->device, des))
	if (dma_mapping_error(GET_MEM_PDEV_DEV, des))
		goto dma_map_err;

	tx_q->tx_skbuff_dma[first_entry].buf = des;
@@ -3017,10 +3017,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
	for (i = 0; i < nfrags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		des = skb_frag_dma_map(priv->device, frag, 0,
		des = skb_frag_dma_map(GET_MEM_PDEV_DEV, frag, 0,
				       skb_frag_size(frag),
				       DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
		if (dma_mapping_error(GET_MEM_PDEV_DEV, des))
			goto dma_map_err;

		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
@@ -3217,9 +3217,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
		else
			desc = tx_q->dma_tx + entry;

		des = skb_frag_dma_map(priv->device, frag, 0, len,
		des = skb_frag_dma_map(GET_MEM_PDEV_DEV, frag, 0, len,
				       DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
		if (dma_mapping_error(GET_MEM_PDEV_DEV, des))
			goto dma_map_err; /* should reuse desc w/o issues */

		tx_q->tx_skbuff_dma[entry].buf = des;
@@ -3306,9 +3306,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
	if (likely(!is_jumbo)) {
		bool last_segment = (nfrags == 0);

		des = dma_map_single(priv->device, skb->data,
		des = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
				     nopaged_len, DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
		if (dma_mapping_error(GET_MEM_PDEV_DEV, des))
			goto dma_map_err;

		tx_q->tx_skbuff_dma[first_entry].buf = des;
@@ -3424,7 +3424,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)

			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);

			dma_sync_single_for_device(priv->device, buf->sec_addr,
			dma_sync_single_for_device(GET_MEM_PDEV_DEV,
						   buf->sec_addr,
						   len, DMA_FROM_DEVICE);
		}

@@ -3433,7 +3434,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
		/* Sync whole allocation to device. This will invalidate old
		 * data.
		 */
		dma_sync_single_for_device(priv->device, buf->addr, len,
		dma_sync_single_for_device(GET_MEM_PDEV_DEV, buf->addr, len,
					   DMA_FROM_DEVICE);

		stmmac_set_desc_addr(priv, p, buf->addr);
@@ -3574,7 +3575,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
			    unlikely(status != llc_snap))
				len -= ETH_FCS_LEN;
		}

		if (!skb) {
			int ret = stmmac_get_rx_header_len(priv, p, &hlen);

@@ -3595,7 +3595,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
				continue;
			}

			dma_sync_single_for_cpu(priv->device, buf->addr, len,
			dma_sync_single_for_cpu(GET_MEM_PDEV_DEV,
						buf->addr, len,
						DMA_FROM_DEVICE);
			skb_copy_to_linear_data(skb, page_address(buf->page),
						len);
@@ -3610,7 +3611,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
			if (likely(status & rx_not_ls))
				buf_len = priv->dma_buf_sz;

			dma_sync_single_for_cpu(priv->device, buf->addr,
			dma_sync_single_for_cpu(GET_MEM_PDEV_DEV, buf->addr,
						buf_len, DMA_FROM_DEVICE);
			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
					buf->page, 0, buf_len,
@@ -3622,7 +3623,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
		}

		if (sec_len > 0) {
			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
			dma_sync_single_for_cpu(GET_MEM_PDEV_DEV, buf->sec_addr,
						sec_len, DMA_FROM_DEVICE);
			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
					buf->sec_page, 0, sec_len,