Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a06ebb8d authored by Saeed Mahameed's avatar Saeed Mahameed
Browse files


Merge mlx5-next patches needed for upcoming mlx5 software steering.

1) Alex adds HW bits and definitions required for SW steering
2) Ariel moves device memory management to mlx5_core (From mlx5_ib)
3) Maor, Cleanups and fixups for eswitch mode and RoCE
4) Mark, Set only stag for match untagged packets

Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parents 4bc61b0b fc603294
Loading
Loading
Loading
Loading
+0 −130
Original line number Diff line number Diff line
@@ -186,136 +186,6 @@ int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
	return err;
}

int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
			  u16 uid, phys_addr_t *addr, u32 *obj_id)
{
	struct mlx5_core_dev *dev = dm->dev;
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
	u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
	unsigned long *block_map;
	u64 icm_start_addr;
	u32 log_icm_size;
	u32 num_blocks;
	u32 max_blocks;
	u64 block_idx;
	void *sw_icm;
	int ret;

	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
	MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);

	switch (type) {
	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
						steering_sw_icm_start_address);
		log_icm_size = MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size);
		block_map = dm->steering_sw_icm_alloc_blocks;
		break;
	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
					header_modify_sw_icm_start_address);
		log_icm_size = MLX5_CAP_DEV_MEM(dev,
						log_header_modify_sw_icm_size);
		block_map = dm->header_modify_sw_icm_alloc_blocks;
		break;
	default:
		return -EINVAL;
	}

	num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
		     MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
	max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
	spin_lock(&dm->lock);
	block_idx = bitmap_find_next_zero_area(block_map,
					       max_blocks,
					       0,
					       num_blocks, 0);

	if (block_idx < max_blocks)
		bitmap_set(block_map,
			   block_idx, num_blocks);

	spin_unlock(&dm->lock);

	if (block_idx >= max_blocks)
		return -ENOMEM;

	sw_icm = MLX5_ADDR_OF(create_sw_icm_in, in, sw_icm);
	icm_start_addr += block_idx << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
	MLX5_SET64(sw_icm, sw_icm, sw_icm_start_addr,
		   icm_start_addr);
	MLX5_SET(sw_icm, sw_icm, log_sw_icm_size, ilog2(length));

	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
	if (ret) {
		spin_lock(&dm->lock);
		bitmap_clear(block_map,
			     block_idx, num_blocks);
		spin_unlock(&dm->lock);

		return ret;
	}

	*addr = icm_start_addr;
	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);

	return 0;
}

int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
			    u16 uid, phys_addr_t addr, u32 obj_id)
{
	struct mlx5_core_dev *dev = dm->dev;
	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
	unsigned long *block_map;
	u32 num_blocks;
	u64 start_idx;
	int err;

	num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
		     MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);

	switch (type) {
	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
		start_idx =
			(addr - MLX5_CAP64_DEV_MEM(
					dev, steering_sw_icm_start_address)) >>
			MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
		block_map = dm->steering_sw_icm_alloc_blocks;
		break;
	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
		start_idx =
			(addr -
			 MLX5_CAP64_DEV_MEM(
				 dev, header_modify_sw_icm_start_address)) >>
			MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
		block_map = dm->header_modify_sw_icm_alloc_blocks;
		break;
	default:
		return -EINVAL;
	}

	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_SW_ICM);
	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
	MLX5_SET(general_obj_in_cmd_hdr, in, uid, uid);

	err =  mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
	if (err)
		return err;

	spin_lock(&dm->lock);
	bitmap_clear(block_map,
		     start_idx, num_blocks);
	spin_unlock(&dm->lock);

	return 0;
}

int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
{
	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+0 −4
Original line number Diff line number Diff line
@@ -65,8 +65,4 @@ int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
			     u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
		     u16 opmod, u8 port);
int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
			  u16 uid, phys_addr_t *addr, u32 *obj_id);
int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
			    u16 uid, phys_addr_t addr, u32 obj_id);
#endif /* MLX5_IB_CMD_H */
+30 −72
Original line number Diff line number Diff line
@@ -2280,6 +2280,7 @@ static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
			return -EOPNOTSUPP;
		break;
	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
		if (!capable(CAP_SYS_RAWIO) ||
		    !capable(CAP_NET_RAW))
			return -EPERM;
@@ -2344,18 +2345,18 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
				  struct uverbs_attr_bundle *attrs,
				  int type)
{
	struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
	struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
	u64 act_size;
	int err;

	/* Allocation size must a multiple of the basic block size
	 * and a power of 2.
	 */
	act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
	act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
	act_size = roundup_pow_of_two(act_size);

	dm->size = act_size;
	err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size,
	err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
				   to_mucontext(ctx)->devx_uid, &dm->dev_addr,
				   &dm->icm_dm.obj_id);
	if (err)
@@ -2365,9 +2366,9 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
			     MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
			     &dm->dev_addr, sizeof(dm->dev_addr));
	if (err)
		mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size,
					to_mucontext(ctx)->devx_uid,
					dm->dev_addr, dm->icm_dm.obj_id);
		mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
				       to_mucontext(ctx)->devx_uid, dm->dev_addr,
				       dm->icm_dm.obj_id);

	return err;
}
@@ -2407,8 +2408,14 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
					    attrs);
		break;
	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
		err = handle_alloc_dm_sw_icm(context, dm,
					     attr, attrs,
					     MLX5_SW_ICM_TYPE_STEERING);
		break;
	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
		err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type);
		err = handle_alloc_dm_sw_icm(context, dm,
					     attr, attrs,
					     MLX5_SW_ICM_TYPE_HEADER_MODIFY);
		break;
	default:
		err = -EOPNOTSUPP;
@@ -2428,6 +2435,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
{
	struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
	struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
	struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
	struct mlx5_ib_dm *dm = to_mdm(ibdm);
	u32 page_idx;
@@ -2439,18 +2447,22 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
		if (ret)
			return ret;

		page_idx = (dm->dev_addr -
			    pci_resource_start(dm_db->dev->pdev, 0) -
			    MLX5_CAP64_DEV_MEM(dm_db->dev,
					       memic_bar_start_addr)) >>
		page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) -
			    MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >>
			    PAGE_SHIFT;
		bitmap_clear(ctx->dm_pages, page_idx,
			     DIV_ROUND_UP(dm->size, PAGE_SIZE));
		break;
	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
		ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
					     dm->size, ctx->devx_uid, dm->dev_addr,
					     dm->icm_dm.obj_id);
		if (ret)
			return ret;
		break;
	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
		ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size,
					      ctx->devx_uid, dm->dev_addr,
		ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
					     dm->size, ctx->devx_uid, dm->dev_addr,
					     dm->icm_dm.obj_id);
		if (ret)
			return ret;
@@ -6096,8 +6108,6 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,

static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
	struct mlx5_core_dev *mdev = dev->mdev;

	mlx5_ib_cleanup_multiport_master(dev);
	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
		srcu_barrier(&dev->mr_srcu);
@@ -6105,29 +6115,11 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
	}

	WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));

	WARN_ON(dev->dm.steering_sw_icm_alloc_blocks &&
		!bitmap_empty(
			dev->dm.steering_sw_icm_alloc_blocks,
			BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) -
			    MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));

	kfree(dev->dm.steering_sw_icm_alloc_blocks);

	WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks &&
		!bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks,
			      BIT(MLX5_CAP_DEV_MEM(
					  mdev, log_header_modify_sw_icm_size) -
				  MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));

	kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
}

static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_core_dev *mdev = dev->mdev;
	u64 header_modify_icm_blocks = 0;
	u64 steering_icm_blocks = 0;
	int err;
	int i;

@@ -6174,51 +6166,17 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
	INIT_LIST_HEAD(&dev->qp_list);
	spin_lock_init(&dev->reset_flow_resource_lock);

	if (MLX5_CAP_GEN_64(mdev, general_obj_types) &
	    MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) {
		if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) {
			steering_icm_blocks =
				BIT(MLX5_CAP_DEV_MEM(mdev,
						     log_steering_sw_icm_size) -
				    MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));

			dev->dm.steering_sw_icm_alloc_blocks =
				kcalloc(BITS_TO_LONGS(steering_icm_blocks),
					sizeof(unsigned long), GFP_KERNEL);
			if (!dev->dm.steering_sw_icm_alloc_blocks)
				goto err_mp;
		}

		if (MLX5_CAP64_DEV_MEM(mdev,
				       header_modify_sw_icm_start_address)) {
			header_modify_icm_blocks = BIT(
				MLX5_CAP_DEV_MEM(
					mdev, log_header_modify_sw_icm_size) -
				MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));

			dev->dm.header_modify_sw_icm_alloc_blocks =
				kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
					sizeof(unsigned long), GFP_KERNEL);
			if (!dev->dm.header_modify_sw_icm_alloc_blocks)
				goto err_dm;
		}
	}

	spin_lock_init(&dev->dm.lock);
	dev->dm.dev = mdev;

	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
		err = init_srcu_struct(&dev->mr_srcu);
		if (err)
			goto err_dm;
			goto err_mp;
	}

	return 0;

err_dm:
	kfree(dev->dm.steering_sw_icm_alloc_blocks);
	kfree(dev->dm.header_modify_sw_icm_alloc_blocks);

err_mp:
	mlx5_ib_cleanup_multiport_master(dev);

+0 −2
Original line number Diff line number Diff line
@@ -881,8 +881,6 @@ struct mlx5_dm {
	 */
	spinlock_t lock;
	DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
	unsigned long *steering_sw_icm_alloc_blocks;
	unsigned long *header_modify_sw_icm_alloc_blocks;
};

struct mlx5_read_counters_attr {
+1 −1
Original line number Diff line number Diff line
@@ -15,7 +15,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
		health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
		transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
		fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
		lib/devcom.o lib/pci_vsc.o diag/fs_tracepoint.o \
		lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
		diag/fw_tracer.o diag/crdump.o devlink.o

#
Loading