Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f3f050a4 authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-updates-2019-04-30' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
mlx5-updates-2019-04-30

mlx5 misc updates:

1) Bodong Wang and Parav Pandit (6):
   - Remove unused mlx5_query_nic_vport_vlans
   - vport macros refactoring
   - Fix vport access in E-Switch
   - Use atomic rep state to serialize state change

2) Eli Britstein (2):
   - prio tag mode support, added ACLs and replace TC vlan pop with
     vlan 0 rewrite when prio tag mode is enabled.

3) Erez Alfasi (2):
   - ethtool: Add SFF-8436 and SFF-8636 max EEPROM length definitions
   - mlx5e: ethtool, Add support for EEPROM high pages query

4) Masahiro Yamada (1):
   - remove meaningless CFLAGS_tracepoint.o

5) Maxim Mikityanskiy (1):
   - Put the common XDP code into a function

6) Tariq Toukan (2):
   - Turn on HW tunnel offload in all TIRs

7) Vlad Buslov (1):
   - Return error when trying to insert existing flower filter
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 18af9626 6f4e0219
Loading
Loading
Loading
Loading
+11 −5
Original line number Original line Diff line number Diff line
@@ -181,7 +181,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
							  ibdev->rep->vport);
							  ibdev->rep->vport);
			if (rep_ndev == ndev)
			if (rep_ndev == ndev)
				roce->netdev = ndev;
				roce->netdev = ndev;
		} else if (ndev->dev.parent == &mdev->pdev->dev) {
		} else if (ndev->dev.parent == mdev->device) {
			roce->netdev = ndev;
			roce->netdev = ndev;
		}
		}
		write_unlock(&roce->netdev_lock);
		write_unlock(&roce->netdev_lock);
@@ -4356,8 +4356,12 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
				 struct ib_event *ibev)
				 struct ib_event *ibev)
{
{
	u8 port = (eqe->data.port.port >> 4) & 0xf;

	switch (eqe->sub_type) {
	switch (eqe->sub_type) {
	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
					    IB_LINK_LAYER_ETHERNET)
			schedule_work(&ibdev->delay_drop.delay_drop_work);
			schedule_work(&ibdev->delay_drop.delay_drop_work);
		break;
		break;
	default: /* do nothing */
	default: /* do nothing */
@@ -5675,7 +5679,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
			}
			}


			if (bound) {
			if (bound) {
				dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
				dev_dbg(mpi->mdev->device,
					"removing port from unaffiliated list.\n");
				mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
				mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
				list_del(&mpi->list);
				list_del(&mpi->list);
				break;
				break;
@@ -5874,7 +5879,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
	dev->ib_dev.phys_port_cnt	= dev->num_ports;
	dev->ib_dev.phys_port_cnt	= dev->num_ports;
	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
	dev->ib_dev.dev.parent		= &mdev->pdev->dev;
	dev->ib_dev.dev.parent		= mdev->device;


	mutex_init(&dev->cap_mask_mutex);
	mutex_init(&dev->cap_mask_mutex);
	INIT_LIST_HEAD(&dev->qp_list);
	INIT_LIST_HEAD(&dev->qp_list);
@@ -6563,7 +6568,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)


	if (!bound) {
	if (!bound) {
		list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
		list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
		dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
		dev_dbg(mdev->device,
			"no suitable IB device found to bind to, added to unaffiliated list.\n");
	}
	}
	mutex_unlock(&mlx5_ib_multiport_mutex);
	mutex_unlock(&mlx5_ib_multiport_mutex);


+1 −3
Original line number Original line Diff line number Diff line
@@ -36,7 +36,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tu
#
#
# Core extra
# Core extra
#
#
mlx5_core-$(CONFIG_MLX5_ESWITCH)   += eswitch.o eswitch_offloads.o ecpf.o
mlx5_core-$(CONFIG_MLX5_ESWITCH)   += eswitch.o eswitch_offloads.o ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS)      += lib/mpfs.o
mlx5_core-$(CONFIG_MLX5_MPFS)      += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN)          += lib/vxlan.o
mlx5_core-$(CONFIG_VXLAN)          += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
@@ -58,5 +58,3 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
				     en_accel/ipsec_stats.o
				     en_accel/ipsec_stats.o


mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o

CFLAGS_tracepoint.o := -I$(src)
+10 −9
Original line number Original line Diff line number Diff line
@@ -57,15 +57,16 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
					   int node)
					   int node)
{
{
	struct mlx5_priv *priv = &dev->priv;
	struct mlx5_priv *priv = &dev->priv;
	struct device *device = dev->device;
	int original_node;
	int original_node;
	void *cpu_handle;
	void *cpu_handle;


	mutex_lock(&priv->alloc_mutex);
	mutex_lock(&priv->alloc_mutex);
	original_node = dev_to_node(&dev->pdev->dev);
	original_node = dev_to_node(device);
	set_dev_node(&dev->pdev->dev, node);
	set_dev_node(device, node);
	cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
	cpu_handle = dma_alloc_coherent(device, size, dma_handle,
					GFP_KERNEL);
					GFP_KERNEL);
	set_dev_node(&dev->pdev->dev, original_node);
	set_dev_node(device, original_node);
	mutex_unlock(&priv->alloc_mutex);
	mutex_unlock(&priv->alloc_mutex);
	return cpu_handle;
	return cpu_handle;
}
}
@@ -110,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);


void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
{
	dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
	dma_free_coherent(dev->device, buf->size, buf->frags->buf,
			  buf->frags->map);
			  buf->frags->map);


	kfree(buf->frags);
	kfree(buf->frags);
@@ -139,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
		if (!frag->buf)
		if (!frag->buf)
			goto err_free_buf;
			goto err_free_buf;
		if (frag->map & ((1 << buf->page_shift) - 1)) {
		if (frag->map & ((1 << buf->page_shift) - 1)) {
			dma_free_coherent(&dev->pdev->dev, frag_sz,
			dma_free_coherent(dev->device, frag_sz,
					  buf->frags[i].buf, buf->frags[i].map);
					  buf->frags[i].buf, buf->frags[i].map);
			mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
			mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
				       &frag->map, buf->page_shift);
				       &frag->map, buf->page_shift);
@@ -152,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,


err_free_buf:
err_free_buf:
	while (i--)
	while (i--)
		dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
		dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
				  buf->frags[i].map);
				  buf->frags[i].map);
	kfree(buf->frags);
	kfree(buf->frags);
err_out:
err_out:
@@ -168,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
	for (i = 0; i < buf->npages; i++) {
	for (i = 0; i < buf->npages; i++) {
		int frag_sz = min_t(int, size, PAGE_SIZE);
		int frag_sz = min_t(int, size, PAGE_SIZE);


		dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
		dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
				  buf->frags[i].map);
				  buf->frags[i].map);
		size -= frag_sz;
		size -= frag_sz;
	}
	}
@@ -274,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
	__set_bit(db->index, db->u.pgdir->bitmap);
	__set_bit(db->index, db->u.pgdir->bitmap);


	if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
	if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
		dma_free_coherent(dev->device, PAGE_SIZE,
				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
		list_del(&db->u.pgdir->list);
		list_del(&db->u.pgdir->list);
		bitmap_free(db->u.pgdir->bitmap);
		bitmap_free(db->u.pgdir->bitmap);
+4 −5
Original line number Original line Diff line number Diff line
@@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev)
	struct mlx5_cmd *cmd = &dev->cmd;
	struct mlx5_cmd *cmd = &dev->cmd;


	snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
	snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
		 dev->priv.name);
		 dev_name(dev->device));
}
}


static void clean_debug_files(struct mlx5_core_dev *dev)
static void clean_debug_files(struct mlx5_core_dev *dev)
@@ -1852,7 +1852,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)


static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
{
	struct device *ddev = &dev->pdev->dev;
	struct device *ddev = dev->device;


	cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
	cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
						&cmd->alloc_dma, GFP_KERNEL);
						&cmd->alloc_dma, GFP_KERNEL);
@@ -1883,7 +1883,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)


static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
{
	struct device *ddev = &dev->pdev->dev;
	struct device *ddev = dev->device;


	dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
	dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
			  cmd->alloc_dma);
			  cmd->alloc_dma);
@@ -1908,8 +1908,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
		return -EINVAL;
		return -EINVAL;
	}
	}


	cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align,
	cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
				    0);
	if (!cmd->pool)
	if (!cmd->pool)
		return -ENOMEM;
		return -ENOMEM;


+3 −2
Original line number Original line Diff line number Diff line
@@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw,
	TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
	TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),


	TP_STRUCT__entry(
	TP_STRUCT__entry(
		__string(dev_name, tracer->dev->priv.name)
		__string(dev_name, dev_name(tracer->dev->device))
		__field(u64, trace_timestamp)
		__field(u64, trace_timestamp)
		__field(bool, lost)
		__field(bool, lost)
		__field(u8, event_id)
		__field(u8, event_id)
@@ -55,7 +55,8 @@ TRACE_EVENT(mlx5_fw,
	),
	),


	TP_fast_assign(
	TP_fast_assign(
		__assign_str(dev_name, tracer->dev->priv.name);
		__assign_str(dev_name,
			     dev_name(tracer->dev->device));
		__entry->trace_timestamp = trace_timestamp;
		__entry->trace_timestamp = trace_timestamp;
		__entry->lost = lost;
		__entry->lost = lost;
		__entry->event_id = event_id;
		__entry->event_id = event_id;
Loading