Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d71a756a authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'dsa-ACB-for-bcm_sf2-and-bcmsysport'



Florian Fainelli says:

====================
Enable ACB for bcm_sf2 and bcmsysport

This patch series enables Broadcom's Advanced Congestion Buffering mechanism
which requires cooperation between the CPU/Management Ethernet MAC controller
and the switch.

I took the notifier approach because ultimately the information we need to
carry to the master network device is DSA specific and I saw little room for
generalizing beyond what DSA requires. Chances are that this is highly specific
to the Broadcom HW as I don't know of any HW out there that supports something
nearly similar for similar or identical needs.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3f7832c2 723934fb
Loading
Loading
Loading
Loading
+30 −0
Original line number Diff line number Diff line
@@ -205,6 +205,19 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
	if (port == priv->moca_port)
		bcm_sf2_port_intr_enable(priv, port);

	/* Set per-queue pause threshold to 32 */
	core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));

	/* Set ACB threshold to 24 */
	for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
		reg = acb_readl(priv, ACB_QUEUE_CFG(port *
						    SF2_NUM_EGRESS_QUEUES + i));
		reg &= ~XOFF_THRESHOLD_MASK;
		reg |= 24;
		acb_writel(priv, reg, ACB_QUEUE_CFG(port *
						    SF2_NUM_EGRESS_QUEUES + i));
	}

	return b53_enable_port(ds, port, phy);
}

@@ -613,6 +626,20 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
		status->pause = 1;
}

static void bcm_sf2_enable_acb(struct dsa_switch *ds)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
	u32 reg;

	/* Enable ACB globally */
	reg = acb_readl(priv, ACB_CONTROL);
	reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
	acb_writel(priv, reg, ACB_CONTROL);
	reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
	reg |= ACB_EN | ACB_ALGORITHM;
	acb_writel(priv, reg, ACB_CONTROL);
}

static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
{
	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -655,6 +682,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
			bcm_sf2_imp_setup(ds, port);
	}

	bcm_sf2_enable_acb(ds);

	return 0;
}

@@ -766,6 +795,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
	}

	bcm_sf2_sw_configure_vlan(ds);
	bcm_sf2_enable_acb(ds);

	return 0;
}
+23 −0
Original line number Diff line number Diff line
@@ -115,6 +115,24 @@ enum bcm_sf2_reg_offs {
#define P7_IRQ_OFF			0
#define P_IRQ_OFF(x)			((6 - (x)) * P_NUM_IRQ)

/* Register set relative to 'ACB' */
#define ACB_CONTROL			0x00
#define  ACB_EN				(1 << 0)
#define  ACB_ALGORITHM			(1 << 1)
#define  ACB_FLUSH_SHIFT		2
#define  ACB_FLUSH_MASK			0x3

#define ACB_QUEUE_0_CFG			0x08
#define  XOFF_THRESHOLD_MASK		0x7ff
#define  XON_EN				(1 << 11)
#define  TOTAL_XOFF_THRESHOLD_SHIFT	12
#define  TOTAL_XOFF_THRESHOLD_MASK	0x7ff
#define  TOTAL_XOFF_EN			(1 << 23)
#define  TOTAL_XON_EN			(1 << 24)
#define  PKTLEN_SHIFT			25
#define  PKTLEN_MASK			0x3f
#define ACB_QUEUE_CFG(x)		(ACB_QUEUE_0_CFG + ((x) * 0x4))

/* Register set relative to 'CORE' */
#define CORE_G_PCTL_PORT0		0x00000
#define CORE_G_PCTL_PORT(x)		(CORE_G_PCTL_PORT0 + (x * 0x4))
@@ -237,6 +255,11 @@ enum bcm_sf2_reg_offs {
#define CORE_PORT_VLAN_CTL_PORT(x)	(0xc400 + ((x) * 0x8))
#define  PORT_VLAN_CTRL_MASK		0x1ff

#define CORE_TXQ_THD_PAUSE_QN_PORT_0	0x2c80
#define  TXQ_PAUSE_THD_MASK		0x7ff
#define CORE_TXQ_THD_PAUSE_QN_PORT(x)	(CORE_TXQ_THD_PAUSE_QN_PORT_0 + \
					(x) * 0x8)

#define CORE_DEFAULT_1Q_TAG_P(x)	(0xd040 + ((x) * 8))
#define  CFI_SHIFT			12
#define  PRI_SHIFT			13
+115 −4
Original line number Diff line number Diff line
@@ -1416,9 +1416,20 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
	tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
	tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
	tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
	tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));

	/* Configure QID and port mapping */
	reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
	reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
	reg |= ring->switch_queue & RING_QID_MASK;
	reg |= ring->switch_port << RING_PORT_ID_SHIFT;
	tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
	tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));

	/* Enable ACB algorithm 2 */
	reg = tdma_readl(priv, TDMA_CONTROL);
	reg |= tdma_control_bit(priv, ACB_ALGO);
	tdma_writel(priv, reg, TDMA_CONTROL);

	/* Do not use tdma_control_bit() here because TSB_SWAP1 collides
	 * with the original definition of ACB_ALGO
	 */
@@ -1447,8 +1458,9 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
	napi_enable(&ring->napi);

	netif_dbg(priv, hw, priv->netdev,
		  "TDMA cfg, size=%d, desc_cpu=%p\n",
		  ring->size, ring->desc_cpu);
		  "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
		  ring->size, ring->desc_cpu, ring->switch_queue,
		  ring->switch_port);

	return 0;
}
@@ -2011,6 +2023,92 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
};

static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
				    void *accel_priv,
				    select_queue_fallback_t fallback)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	u16 queue = skb_get_queue_mapping(skb);
	struct bcm_sysport_tx_ring *tx_ring;
	unsigned int q, port;

	if (!netdev_uses_dsa(dev))
		return fallback(dev, skb);

	/* DSA tagging layer will have configured the correct queue */
	q = BRCM_TAG_GET_QUEUE(queue);
	port = BRCM_TAG_GET_PORT(queue);
	tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];

	return tx_ring->index;
}

static int bcm_sysport_map_queues(struct net_device *dev,
				  struct dsa_notifier_register_info *info)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	struct bcm_sysport_tx_ring *ring;
	struct net_device *slave_dev;
	unsigned int num_tx_queues;
	unsigned int q, start, port;

	/* We can't be setting up queue inspection for non directly attached
	 * switches
	 */
	if (info->switch_number)
		return 0;

	port = info->port_number;
	slave_dev = info->info.dev;

	/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
	 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
	 * per-port (slave_dev) network devices queue, we achieve just that.
	 * This need to happen now before any slave network device is used such
	 * it accurately reflects the number of real TX queues.
	 */
	if (priv->is_lite)
		netif_set_real_num_tx_queues(slave_dev,
					     slave_dev->num_tx_queues / 2);
	num_tx_queues = slave_dev->real_num_tx_queues;

	if (priv->per_port_num_tx_queues &&
	    priv->per_port_num_tx_queues != num_tx_queues)
		netdev_warn(slave_dev, "asymetric number of per-port queues\n");

	priv->per_port_num_tx_queues = num_tx_queues;

	start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
	for (q = 0; q < num_tx_queues; q++) {
		ring = &priv->tx_rings[q + start];

		/* Just remember the mapping actual programming done
		 * during bcm_sysport_init_tx_ring
		 */
		ring->switch_queue = q;
		ring->switch_port = port;
		priv->ring_map[q + port * num_tx_queues] = ring;

		/* Set all queues as being used now */
		set_bit(q + start, &priv->queue_bitmap);
	}

	return 0;
}

static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
				    unsigned long event, void *ptr)
{
	struct dsa_notifier_register_info *info;

	if (event != DSA_PORT_REGISTER)
		return NOTIFY_DONE;

	info = ptr;

	return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
}

static const struct net_device_ops bcm_sysport_netdev_ops = {
	.ndo_start_xmit		= bcm_sysport_xmit,
	.ndo_tx_timeout		= bcm_sysport_tx_timeout,
@@ -2023,6 +2121,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
	.ndo_poll_controller	= bcm_sysport_poll_controller,
#endif
	.ndo_get_stats64	= bcm_sysport_get_stats64,
	.ndo_select_queue	= bcm_sysport_select_queue,
};

#define REV_FMT	"v%2x.%02x"
@@ -2172,10 +2271,18 @@ static int bcm_sysport_probe(struct platform_device *pdev)

	u64_stats_init(&priv->syncp);

	priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;

	ret = register_dsa_notifier(&priv->dsa_notifier);
	if (ret) {
		dev_err(&pdev->dev, "failed to register DSA notifier\n");
		goto err_deregister_fixed_link;
	}

	ret = register_netdev(dev);
	if (ret) {
		dev_err(&pdev->dev, "failed to register net_device\n");
		goto err_deregister_fixed_link;
		goto err_deregister_notifier;
	}

	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
@@ -2188,6 +2295,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)

	return 0;

err_deregister_notifier:
	unregister_dsa_notifier(&priv->dsa_notifier);
err_deregister_fixed_link:
	if (of_phy_is_fixed_link(dn))
		of_phy_deregister_fixed_link(dn);
@@ -2199,11 +2308,13 @@ static int bcm_sysport_probe(struct platform_device *pdev)
static int bcm_sysport_remove(struct platform_device *pdev)
{
	struct net_device *dev = dev_get_drvdata(&pdev->dev);
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	struct device_node *dn = pdev->dev.of_node;

	/* Not much to do, ndo_close has been called
	 * and we use managed allocations
	 */
	unregister_dsa_notifier(&priv->dsa_notifier);
	unregister_netdev(dev);
	if (of_phy_is_fixed_link(dn))
		of_phy_deregister_fixed_link(dn);
+10 −1
Original line number Diff line number Diff line
@@ -404,7 +404,7 @@ struct bcm_rsb {
#define  RING_CONS_INDEX_MASK		0xffff

#define RING_MAPPING			0x14
#define  RING_QID_MASK			0x3
#define  RING_QID_MASK			0x7
#define  RING_PORT_ID_SHIFT		3
#define  RING_PORT_ID_MASK		0x7
#define  RING_IGNORE_STATUS		(1 << 6)
@@ -712,6 +712,8 @@ struct bcm_sysport_tx_ring {
	struct bcm_sysport_priv *priv;	/* private context backpointer */
	unsigned long	packets;	/* packets statistics */
	unsigned long	bytes;		/* bytes statistics */
	unsigned int	switch_queue;	/* switch port queue number */
	unsigned int	switch_port;	/* switch port queue number */
};

/* Driver private structure */
@@ -765,5 +767,12 @@ struct bcm_sysport_priv {

	/* For atomic update generic 64bit value on 32bit Machine */
	struct u64_stats_sync	syncp;

	/* map information between switch port queues and local queues */
	struct notifier_block	dsa_notifier;
	unsigned int		per_port_num_tx_queues;
	unsigned long		queue_bitmap;
	struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];

};
#endif /* __BCM_SYSPORT_H */
+50 −0
Original line number Diff line number Diff line
@@ -471,4 +471,54 @@ static inline int dsa_switch_resume(struct dsa_switch *ds)
}
#endif /* CONFIG_PM_SLEEP */

enum dsa_notifier_type {
	DSA_PORT_REGISTER,
	DSA_PORT_UNREGISTER,
};

struct dsa_notifier_info {
	struct net_device *dev;
};

struct dsa_notifier_register_info {
	struct dsa_notifier_info info;	/* must be first */
	struct net_device *master;
	unsigned int port_number;
	unsigned int switch_number;
};

static inline struct net_device *
dsa_notifier_info_to_dev(const struct dsa_notifier_info *info)
{
	return info->dev;
}

#if IS_ENABLED(CONFIG_NET_DSA)
int register_dsa_notifier(struct notifier_block *nb);
int unregister_dsa_notifier(struct notifier_block *nb);
int call_dsa_notifiers(unsigned long val, struct net_device *dev,
		       struct dsa_notifier_info *info);
#else
static inline int register_dsa_notifier(struct notifier_block *nb)
{
	return 0;
}

static inline int unregister_dsa_notifier(struct notifier_block *nb)
{
	return 0;
}

static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev,
				     struct dsa_notifier_info *info)
{
	return NOTIFY_DONE;
}
#endif

/* Broadcom tag specific helpers to insert and extract queue/port number */
#define BRCM_TAG_SET_PORT_QUEUE(p, q)	((p) << 8 | q)
#define BRCM_TAG_GET_PORT(v)		((v) >> 8)
#define BRCM_TAG_GET_QUEUE(v)		((v) & 0xff)

#endif
Loading