Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 916035dd authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx4-vf-counters'



Or Gerlitz says:

====================
mlx4 driver update (+ new VF ndo)

This series from Eran and Hadar is further dealing with traffic
counters in the mlx4 driver, this time mostly around SRIOV.

We added a new ndo to read the VF counters through the PF netdev
netlink infrastructure plus mlx4 implementation for that ndo.

changes from V0:
  - applied feedback from John to use nested netlink encoding
    for the VF counters so we can extend it later
  - add handling of single ported VFs in the mlx4_en driver new ndo
  - avoid chopping the FW counters from 64 to 32 bits in mlx4_en PF flow
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b4ad7baa 62a89055
Loading
Loading
Loading
Loading
+12 −26
Original line number Diff line number Diff line
@@ -64,14 +64,6 @@ enum {
#define GUID_TBL_BLK_NUM_ENTRIES 8
#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)

/* Counters should be saturate once they reach their maximum value */
#define ASSIGN_32BIT_COUNTER(counter, value) do {\
	if ((value) > U32_MAX)			 \
		counter = cpu_to_be32(U32_MAX); \
	else					 \
		counter = cpu_to_be32(value);	 \
} while (0)

struct mlx4_mad_rcv_buf {
	struct ib_grh grh;
	u8 payload[256];
@@ -828,30 +820,24 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
			struct ib_wc *in_wc, struct ib_grh *in_grh,
			struct ib_mad *in_mad, struct ib_mad *out_mad)
{
	struct mlx4_cmd_mailbox *mailbox;
	struct mlx4_counter counter_stats;
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	int err;
	u32 inmod = dev->counters[port_num - 1] & 0xffff;
	u8 mode;

	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
		return -EINVAL;

	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
	if (IS_ERR(mailbox))
		return IB_MAD_RESULT_FAILURE;

	err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
			   MLX4_CMD_WRAPPED);
	memset(&counter_stats, 0, sizeof(counter_stats));
	err = mlx4_get_counter_stats(dev->dev,
				     dev->counters[port_num - 1].index,
				     &counter_stats, 0);
	if (err)
		err = IB_MAD_RESULT_FAILURE;
	else {
		memset(out_mad->data, 0, sizeof out_mad->data);
		mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
		switch (mode & 0xf) {
		switch (counter_stats.counter_mode & 0xf) {
		case 0:
			edit_counter(mailbox->buf,
			edit_counter(&counter_stats,
				     (void *)(out_mad->data + 40));
			err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
			break;
@@ -860,8 +846,6 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
		}
	}

	mlx4_free_cmd_mailbox(dev->dev, mailbox);

	return err;
}

@@ -869,8 +853,10 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
			struct ib_wc *in_wc, struct ib_grh *in_grh,
			struct ib_mad *in_mad, struct ib_mad *out_mad)
{
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	switch (rdma_port_get_link_layer(ibdev, port_num)) {
	case IB_LINK_LAYER_INFINIBAND:
		if (!mlx4_is_slave(dev->dev))
			return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
					      in_grh, in_mad, out_mad);
	case IB_LINK_LAYER_ETHERNET:
+30 −13
Original line number Diff line number Diff line
@@ -2098,6 +2098,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
	struct mlx4_ib_iboe *iboe;
	int ib_num_ports = 0;
	int num_req_counters;
	int allocated;
	u32 counter_index;

	pr_info_once("%s", mlx4_ib_version);

@@ -2263,19 +2265,31 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
	num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
	for (i = 0; i < num_req_counters; ++i) {
		mutex_init(&ibdev->qp1_proxy_lock[i]);
		allocated = 0;
		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
						IB_LINK_LAYER_ETHERNET) {
			err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
			err = mlx4_counter_alloc(ibdev->dev, &counter_index);
			/* if failed to allocate a new counter, use default */
			if (err)
				ibdev->counters[i] = -1;
		} else {
			ibdev->counters[i] = -1;
				counter_index =
					mlx4_get_default_counter_index(dev,
								       i + 1);
			else
				allocated = 1;
		} else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
			counter_index = mlx4_get_default_counter_index(dev,
								       i + 1);
		}
		ibdev->counters[i].index = counter_index;
		ibdev->counters[i].allocated = allocated;
		pr_info("counter index %d for port %d allocated %d\n",
			counter_index, i + 1, allocated);
	}
	if (mlx4_is_bonded(dev))
		for (i = 1; i < ibdev->num_ports ; ++i)
			ibdev->counters[i] = ibdev->counters[0];

		for (i = 1; i < ibdev->num_ports ; ++i) {
			ibdev->counters[i].index = ibdev->counters[0].index;
			ibdev->counters[i].allocated = 0;
		}

	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
		ib_num_ports++;
@@ -2415,10 +2429,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
				      ibdev->steer_qpn_count);
err_counter:
	for (; i; --i)
		if (ibdev->counters[i - 1] != -1)
			mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);

	for (i = 0; i < ibdev->num_ports; ++i) {
		if (ibdev->counters[i].index != -1 &&
		    ibdev->counters[i].allocated)
			mlx4_counter_free(ibdev->dev,
					  ibdev->counters[i].index);
	}
err_map:
	iounmap(ibdev->uar_map);

@@ -2535,8 +2551,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)

	iounmap(ibdev->uar_map);
	for (p = 0; p < ibdev->num_ports; ++p)
		if (ibdev->counters[p] != -1)
			mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
		if (ibdev->counters[p].index != -1 &&
		    ibdev->counters[p].allocated)
			mlx4_counter_free(ibdev->dev, ibdev->counters[p].index);
	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
		mlx4_CLOSE_PORT(dev, p);

+6 −1
Original line number Diff line number Diff line
@@ -503,6 +503,11 @@ struct mlx4_ib_iov_port {
	struct mlx4_ib_iov_sysfs_attr mcg_dentry;
};

struct counter_index {
	u32		index;
	u8		allocated;
};

struct mlx4_ib_dev {
	struct ib_device	ib_dev;
	struct mlx4_dev	       *dev;
@@ -521,7 +526,7 @@ struct mlx4_ib_dev {
	struct mutex		cap_mask_mutex;
	bool			ib_active;
	struct mlx4_ib_iboe	iboe;
	int			counters[MLX4_MAX_PORTS];
	struct counter_index    counters[MLX4_MAX_PORTS];
	int		       *eq_table;
	struct kobject	       *iov_parent;
	struct kobject	       *ports_parent;
+4 −3
Original line number Diff line number Diff line
@@ -1539,12 +1539,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
	}

	if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
		if (dev->counters[qp->port - 1] != -1) {
		if (dev->counters[qp->port - 1].index != -1) {
			context->pri_path.counter_index =
						dev->counters[qp->port - 1];
					dev->counters[qp->port - 1].index;
			optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
		} else
			context->pri_path.counter_index = 0xff;
			context->pri_path.counter_index =
				MLX4_SINK_COUNTER_INDEX(dev->dev);

		if (qp->flags & MLX4_IB_QP_NETIF) {
			mlx4_ib_steer_qp_reg(dev, qp, 1);
+87 −0
Original line number Diff line number Diff line
@@ -49,6 +49,7 @@
#include "mlx4.h"
#include "fw.h"
#include "fw_qos.h"
#include "mlx4_stats.h"

#define CMD_POLL_TOKEN 0xffff
#define INBOX_MASK	0xffffffffffffff00ULL
@@ -3166,6 +3167,92 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);

int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
			   struct mlx4_counter *counter_stats, int reset)
{
	struct mlx4_cmd_mailbox *mailbox = NULL;
	struct mlx4_counter *tmp_counter;
	int err;
	u32 if_stat_in_mod;

	if (!counter_stats)
		return -EINVAL;

	if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
		return 0;

	mailbox = mlx4_alloc_cmd_mailbox(dev);
	if (IS_ERR(mailbox))
		return PTR_ERR(mailbox);

	memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
	if_stat_in_mod = counter_index;
	if (reset)
		if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
	err = mlx4_cmd_box(dev, 0, mailbox->dma,
			   if_stat_in_mod, 0,
			   MLX4_CMD_QUERY_IF_STAT,
			   MLX4_CMD_TIME_CLASS_C,
			   MLX4_CMD_NATIVE);
	if (err) {
		mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
			 __func__, counter_index);
		goto if_stat_out;
	}
	tmp_counter = (struct mlx4_counter *)mailbox->buf;
	counter_stats->counter_mode = tmp_counter->counter_mode;
	if (counter_stats->counter_mode == 0) {
		counter_stats->rx_frames =
			cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
				    be64_to_cpu(tmp_counter->rx_frames));
		counter_stats->tx_frames =
			cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
				    be64_to_cpu(tmp_counter->tx_frames));
		counter_stats->rx_bytes =
			cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
				    be64_to_cpu(tmp_counter->rx_bytes));
		counter_stats->tx_bytes =
			cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
				    be64_to_cpu(tmp_counter->tx_bytes));
	}

if_stat_out:
	mlx4_free_cmd_mailbox(dev, mailbox);

	return err;
}
EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);

int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
		      struct ifla_vf_stats *vf_stats)
{
	struct mlx4_counter tmp_vf_stats;
	int slave;
	int err = 0;

	if (!vf_stats)
		return -EINVAL;

	if (!mlx4_is_master(dev))
		return -EPROTONOSUPPORT;

	slave = mlx4_get_slave_indx(dev, vf_idx);
	if (slave < 0)
		return -EINVAL;

	port = mlx4_slaves_closest_port(dev, slave, port);
	err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
	if (!err && tmp_vf_stats.counter_mode == 0) {
		vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
		vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
		vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
		vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
	}

	return err;
}
EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);

int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
Loading