Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0ce9b1e authored by David Daney's avatar David Daney
Browse files

netdev: octeon_mgmt: Make multi-line comment style consistent.



No code changes.  Recent patches have used the netdev style multi-line
comment formatting, making the style inconsistent within octeon_mgmt.c

Update the remaining comment blocks to achieve style harmony.

Signed-off-by: default avatarDavid Daney <david.daney@cavium.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e96f7515
Loading
Loading
Loading
Loading
+9 −18
Original line number Original line Diff line number Diff line
@@ -34,8 +34,7 @@


#define OCTEON_MGMT_NAPI_WEIGHT 16
#define OCTEON_MGMT_NAPI_WEIGHT 16


/*
/* Ring sizes that are powers of two allow for more efficient modulo
 * Ring sizes that are powers of two allow for more efficient modulo
 * opertions.
 * opertions.
 */
 */
#define OCTEON_MGMT_RX_RING_SIZE 512
#define OCTEON_MGMT_RX_RING_SIZE 512
@@ -431,8 +430,7 @@ static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
		netif_receive_skb(skb);
		netif_receive_skb(skb);
		rc = 0;
		rc = 0;
	} else if (re.s.code == RING_ENTRY_CODE_MORE) {
	} else if (re.s.code == RING_ENTRY_CODE_MORE) {
		/*
		/* Packet split across skbs.  This can happen if we
		 * Packet split across skbs.  This can happen if we
		 * increase the MTU.  Buffers that are already in the
		 * increase the MTU.  Buffers that are already in the
		 * rx ring can then end up being too small.  As the rx
		 * rx ring can then end up being too small.  As the rx
		 * ring is refilled, buffers sized for the new MTU
		 * ring is refilled, buffers sized for the new MTU
@@ -462,8 +460,7 @@ static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
	} else {
	} else {
		/* Some other error, discard it. */
		/* Some other error, discard it. */
		dev_kfree_skb_any(skb);
		dev_kfree_skb_any(skb);
		/*
		/* Error statistics are accumulated in
		 * Error statistics are accumulated in
		 * octeon_mgmt_update_rx_stats.
		 * octeon_mgmt_update_rx_stats.
		 */
		 */
	}
	}
@@ -590,8 +587,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
		cam_mode = 0;
		cam_mode = 0;
		available_cam_entries = 8;
		available_cam_entries = 8;
	} else {
	} else {
		/*
		/* One CAM entry for the primary address, leaves seven
		 * One CAM entry for the primary address, leaves seven
		 * for the secondary addresses.
		 * for the secondary addresses.
		 */
		 */
		available_cam_entries = 7 - netdev->uc.count;
		available_cam_entries = 7 - netdev->uc.count;
@@ -663,8 +659,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
	struct octeon_mgmt *p = netdev_priv(netdev);
	struct octeon_mgmt *p = netdev_priv(netdev);
	int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
	int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;


	/*
	/* Limit the MTU to make sure the ethernet packets are between
	 * Limit the MTU to make sure the ethernet packets are between
	 * 64 bytes and 16383 bytes.
	 * 64 bytes and 16383 bytes.
	 */
	 */
	if (size_without_fcs < 64 || size_without_fcs > 16383) {
	if (size_without_fcs < 64 || size_without_fcs > 16383) {
@@ -1044,8 +1039,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
	}
	}
	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
		|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		/*
		/* Force compensation values, as they are not
		 * Force compensation values, as they are not
		 * determined properly by HW
		 * determined properly by HW
		 */
		 */
		union cvmx_agl_gmx_drv_ctl drv_ctl;
		union cvmx_agl_gmx_drv_ctl drv_ctl;
@@ -1078,8 +1072,7 @@ static int octeon_mgmt_open(struct net_device *netdev)


	octeon_mgmt_change_mtu(netdev, netdev->mtu);
	octeon_mgmt_change_mtu(netdev, netdev->mtu);


	/*
	/* Enable the port HW. Packets are not allowed until
	 * Enable the port HW. Packets are not allowed until
	 * cvmx_mgmt_port_enable() is called.
	 * cvmx_mgmt_port_enable() is called.
	 */
	 */
	mix_ctl.u64 = 0;
	mix_ctl.u64 = 0;
@@ -1196,8 +1189,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
	rxx_frm_ctl.s.pre_align = 1;
	rxx_frm_ctl.s.pre_align = 1;
	/*
	/* When set, disables the length check for non-min sized pkts
	 * When set, disables the length check for non-min sized pkts
	 * with padding in the client data.
	 * with padding in the client data.
	 */
	 */
	rxx_frm_ctl.s.pad_len = 1;
	rxx_frm_ctl.s.pad_len = 1;
@@ -1215,8 +1207,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
	rxx_frm_ctl.s.ctl_drp = 1;
	rxx_frm_ctl.s.ctl_drp = 1;
	/* Strip off the preamble */
	/* Strip off the preamble */
	rxx_frm_ctl.s.pre_strp = 1;
	rxx_frm_ctl.s.pre_strp = 1;
	/*
	/* This port is configured to send PREAMBLE+SFD to begin every
	 * This port is configured to send PREAMBLE+SFD to begin every
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 */
	 */
	rxx_frm_ctl.s.pre_chk = 1;
	rxx_frm_ctl.s.pre_chk = 1;