Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ee625938 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlxsw-flooding-and-cosmetics'



Jiri Pirko says:

====================
mlxsw: driver update

This driver update mainly brings support for user to be able to setup
flooding on specified port, via bridge flag. Also, there is a fix in ageing
time conversion. The rest is just cosmetics.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 65bdc43d c7070fc4
Loading
Loading
Loading
Loading
+5 −2
Original line number Original line Diff line number Diff line
@@ -278,8 +278,8 @@ Flooding L2 domain
For a given L2 VLAN domain, the switch device should flood multicast/broadcast
For a given L2 VLAN domain, the switch device should flood multicast/broadcast
and unknown unicast packets to all ports in domain, if allowed by port's
and unknown unicast packets to all ports in domain, if allowed by port's
current STP state.  The switch driver, knowing which ports are within which
current STP state.  The switch driver, knowing which ports are within which
vlan L2 domain, can program the switch device for flooding.  The packet should
vlan L2 domain, can program the switch device for flooding.  The packet may
also be sent to the port netdev for processing by the bridge driver.  The
be sent to the port netdev for processing by the bridge driver.  The
bridge should not reflood the packet to the same ports the device flooded,
bridge should not reflood the packet to the same ports the device flooded,
otherwise there will be duplicate packets on the wire.
otherwise there will be duplicate packets on the wire.


@@ -298,6 +298,9 @@ packets up to the bridge driver for flooding. This is not ideal as the number
of ports scale in the L2 domain as the device is much more efficient at
of ports scale in the L2 domain as the device is much more efficient at
flooding packets that software.
flooding packets that software.


If supported by the device, flood control can be offloaded to it, preventing
certain netdevs from flooding unicast traffic for which there is no FDB entry.

IGMP Snooping
IGMP Snooping
^^^^^^^^^^^^^
^^^^^^^^^^^^^


+2 −2
Original line number Original line Diff line number Diff line
@@ -287,7 +287,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
	mlxsw_emad_op_tlv_status_set(op_tlv, 0);
	mlxsw_emad_op_tlv_status_set(op_tlv, 0);
	mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
	mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
	mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
	mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
	if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
	if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
		mlxsw_emad_op_tlv_method_set(op_tlv,
		mlxsw_emad_op_tlv_method_set(op_tlv,
					     MLXSW_EMAD_OP_TLV_METHOD_QUERY);
					     MLXSW_EMAD_OP_TLV_METHOD_QUERY);
	else
	else
@@ -362,7 +362,7 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
	char *op_tlv;
	char *op_tlv;


	op_tlv = mlxsw_emad_op_tlv(skb);
	op_tlv = mlxsw_emad_op_tlv(skb);
	return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
	return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
}
}


#define MLXSW_EMAD_TIMEOUT_MS 200
#define MLXSW_EMAD_TIMEOUT_MS 200
+2 −1
Original line number Original line Diff line number Diff line
@@ -1662,8 +1662,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
							   CIR_OUT_PARAM_LO));
							   CIR_OUT_PARAM_LO));
			memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
			memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
		}
		}
	} else if (!err && out_mbox)
	} else if (!err && out_mbox) {
		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
	}


	mutex_unlock(&mlxsw_pci->cmd.lock);
	mutex_unlock(&mlxsw_pci->cmd.lock);


+9 −9
Original line number Original line Diff line number Diff line
@@ -348,8 +348,9 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
		     MLXSW_REG_SFD_REC_LEN, 0x0C, false);
		     MLXSW_REG_SFD_REC_LEN, 0x0C, false);


/* reg_sfd_uc_sub_port
/* reg_sfd_uc_sub_port
 * LAG sub port.
 * VEPA channel on local port.
 * Must be 0 if multichannel VEPA is not enabled.
 * Valid only if local port is a non-stacking port. Must be 0 if multichannel
 * VEPA is not enabled.
 * Access: RW
 * Access: RW
 */
 */
MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
@@ -396,8 +397,7 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
	mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
	mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
}


static inline void
static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
					   char *mac, u16 *p_vid,
					   char *mac, u16 *p_vid,
					   u8 *p_local_port)
					   u8 *p_local_port)
{
{
@@ -474,7 +474,7 @@ MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
		       MLXSW_REG_SFN_REC_LEN, 0x02);
		       MLXSW_REG_SFN_REC_LEN, 0x02);


/* reg_sfd_mac_sub_port
/* reg_sfn_mac_sub_port
 * VEPA channel on the local port.
 * VEPA channel on the local port.
 * 0 if multichannel VEPA is not enabled.
 * 0 if multichannel VEPA is not enabled.
 * Access: RO
 * Access: RO
@@ -482,14 +482,14 @@ MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
		     MLXSW_REG_SFN_REC_LEN, 0x08, false);
		     MLXSW_REG_SFN_REC_LEN, 0x08, false);


/* reg_sfd_mac_fid
/* reg_sfn_mac_fid
 * Filtering identifier.
 * Filtering identifier.
 * Access: RO
 * Access: RO
 */
 */
MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
		     MLXSW_REG_SFN_REC_LEN, 0x08, false);
		     MLXSW_REG_SFN_REC_LEN, 0x08, false);


/* reg_sfd_mac_system_port
/* reg_sfn_mac_system_port
 * Unique port identifier for the final destination of the packet.
 * Unique port identifier for the final destination of the packet.
 * Access: RO
 * Access: RO
 */
 */
+3 −2
Original line number Original line Diff line number Diff line
@@ -1227,6 +1227,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
	mlxsw_sp_port->local_port = local_port;
	mlxsw_sp_port->local_port = local_port;
	mlxsw_sp_port->learning = 1;
	mlxsw_sp_port->learning = 1;
	mlxsw_sp_port->learning_sync = 1;
	mlxsw_sp_port->learning_sync = 1;
	mlxsw_sp_port->uc_flood = 1;
	mlxsw_sp_port->pvid = 1;
	mlxsw_sp_port->pvid = 1;


	mlxsw_sp_port->pcpu_stats =
	mlxsw_sp_port->pcpu_stats =
@@ -1899,12 +1900,12 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
				if (err)
				if (err)
					netdev_err(dev, "Failed to join bridge\n");
					netdev_err(dev, "Failed to join bridge\n");
				mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
				mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
				mlxsw_sp_port->bridged = true;
				mlxsw_sp_port->bridged = 1;
			} else {
			} else {
				err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
				err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
				if (err)
				if (err)
					netdev_err(dev, "Failed to leave bridge\n");
					netdev_err(dev, "Failed to leave bridge\n");
				mlxsw_sp_port->bridged = false;
				mlxsw_sp_port->bridged = 0;
				mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
				mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
			}
			}
		}
		}
Loading