Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5600a410 authored by Jason Gunthorpe's avatar Jason Gunthorpe
Browse files

Merge mlx5-next into rdma for-next

From git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Required for dependencies in the next patches.

* mlx5-next:
  net/mlx5: Add rts2rts_qp_counters_set_id field in hca cap
  net/mlx5: Properly name the generic WQE control field
  net/mlx5: Introduce TLS TX offload hardware bits and structures
  net/mlx5: Refactor mlx5_esw_query_functions for modularity
  net/mlx5: E-Switch prepare functions change handler to be modular
  net/mlx5: Introduce and use mlx5_eswitch_get_total_vports()
parents bcde9a83 f8efee08
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
	int num_ports = MLX5_TOTAL_VPORTS(dev);
	int num_ports = mlx5_eswitch_get_total_vports(dev);
	const struct mlx5_ib_profile *profile;
	struct mlx5_ib_dev *ibdev;
	int vport_index;
+33 −9
Original line number Diff line number Diff line
@@ -1715,14 +1715,34 @@ static int eswitch_vport_event(struct notifier_block *nb,
	return NOTIFY_OK;
}

int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
/**
 * mlx5_esw_query_functions - Returns raw output about functions state
 * @dev:	Pointer to device to query
 *
 * mlx5_esw_query_functions() allocates and returns functions changed
 * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
 * Caller must free the memory using kvfree() when valid pointer is returned.
 */
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
	int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
	u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
	u32 *out;
	int err;

	out = kvzalloc(outlen, GFP_KERNEL);
	if (!out)
		return ERR_PTR(-ENOMEM);

	MLX5_SET(query_esw_functions_in, in, opcode,
		 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);

	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
	err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
	if (!err)
		return out;

	kvfree(out);
	return ERR_PTR(err);
}

static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
@@ -1868,14 +1888,16 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)

int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
	int total_vports = MLX5_TOTAL_VPORTS(dev);
	struct mlx5_eswitch *esw;
	struct mlx5_vport *vport;
	int total_vports;
	int err, i;

	if (!MLX5_VPORT_MANAGER(dev))
		return 0;

	total_vports = mlx5_eswitch_get_total_vports(dev);

	esw_info(dev,
		 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
		 total_vports,
@@ -2525,8 +2547,7 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,

void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
{
	u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
	int err;
	const u32 *out;

	WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);

@@ -2535,8 +2556,11 @@ void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
		return;
	}

	err = mlx5_esw_query_functions(esw->dev, out, sizeof(out));
	if (!err)
	out = mlx5_esw_query_functions(esw->dev);
	if (IS_ERR(out))
		return;

	esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
					  host_params_context.host_num_of_vfs);
	kvfree(out);
}
+3 −4
Original line number Diff line number Diff line
@@ -403,7 +403,7 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
			       struct mlx5_core_dev *dev1);

int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen);
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);

#define MLX5_DEBUG_ESWITCH_MASK BIT(3)

@@ -560,10 +560,9 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { ret
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline int
mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
	return -EOPNOTSUPP;
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
+28 −18
Original line number Diff line number Diff line
@@ -1395,7 +1395,7 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)

int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
	int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
	int total_vports = esw->total_vports;
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_eswitch_rep *rep;
	u8 hw_id[ETH_ALEN], rep_type;
@@ -2047,38 +2047,48 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
	esw_destroy_offloads_acl_tables(esw);
}

static void esw_functions_changed_event_handler(struct work_struct *work)
static void
esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
{
	u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
	struct mlx5_host_work *host_work;
	struct mlx5_eswitch *esw;
	bool host_pf_disabled;
	u16 num_vfs = 0;
	int err;

	host_work = container_of(work, struct mlx5_host_work, work);
	esw = host_work->esw;
	u16 new_num_vfs;

	err = mlx5_esw_query_functions(esw->dev, out, sizeof(out));
	num_vfs = MLX5_GET(query_esw_functions_out, out,
	new_num_vfs = MLX5_GET(query_esw_functions_out, out,
			       host_params_context.host_num_of_vfs);
	host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
				    host_params_context.host_pf_disabled);
	if (err || host_pf_disabled || num_vfs == esw->esw_funcs.num_vfs)
		goto out;

	if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
		return;

	/* Number of VFs can only change from "0 to x" or "x to 0". */
	if (esw->esw_funcs.num_vfs > 0) {
		esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
	} else {
		err = esw_offloads_load_vf_reps(esw, num_vfs);
		int err;

		err = esw_offloads_load_vf_reps(esw, new_num_vfs);
		if (err)
			goto out;
			return;
	}
	esw->esw_funcs.num_vfs = new_num_vfs;
}

	esw->esw_funcs.num_vfs = num_vfs;
static void esw_functions_changed_event_handler(struct work_struct *work)
{
	struct mlx5_host_work *host_work;
	struct mlx5_eswitch *esw;
	const u32 *out;

	host_work = container_of(work, struct mlx5_host_work, work);
	esw = host_work->esw;

	out = mlx5_esw_query_functions(esw->dev);
	if (IS_ERR(out))
		goto out;

	esw_vfs_changed_event_handler(esw, out);
	kvfree(out);
out:
	kfree(host_work);
}
+15 −11
Original line number Diff line number Diff line
@@ -2092,7 +2092,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
{
	struct mlx5_flow_steering *steering = dev->priv.steering;

	if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
	if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
		return NULL;

	switch (type) {
@@ -2423,7 +2423,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
	if (!steering->esw_egress_root_ns)
		return;

	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
	for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
		cleanup_root_ns(steering->esw_egress_root_ns[i]);

	kfree(steering->esw_egress_root_ns);
@@ -2438,7 +2438,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
	if (!steering->esw_ingress_root_ns)
		return;

	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
	for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
		cleanup_root_ns(steering->esw_ingress_root_ns[i]);

	kfree(steering->esw_ingress_root_ns);
@@ -2606,16 +2606,18 @@ static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vpo
static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
{
	struct mlx5_flow_steering *steering = dev->priv.steering;
	int total_vports = mlx5_eswitch_get_total_vports(dev);
	int err;
	int i;

	steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
	steering->esw_egress_root_ns =
			kcalloc(total_vports,
				sizeof(*steering->esw_egress_root_ns),
				GFP_KERNEL);
	if (!steering->esw_egress_root_ns)
		return -ENOMEM;

	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
	for (i = 0; i < total_vports; i++) {
		err = init_egress_acl_root_ns(steering, i);
		if (err)
			goto cleanup_root_ns;
@@ -2634,16 +2636,18 @@ static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
{
	struct mlx5_flow_steering *steering = dev->priv.steering;
	int total_vports = mlx5_eswitch_get_total_vports(dev);
	int err;
	int i;

	steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
	steering->esw_ingress_root_ns =
			kcalloc(total_vports,
				sizeof(*steering->esw_ingress_root_ns),
				GFP_KERNEL);
	if (!steering->esw_ingress_root_ns)
		return -ENOMEM;

	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
	for (i = 0; i < total_vports; i++) {
		err = init_ingress_acl_root_ns(steering, i);
		if (err)
			goto cleanup_root_ns;
Loading