Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe1624cf authored by Rasesh Mody's avatar Rasesh Mody Committed by David S. Miller
Browse files

bna: RX Filter Enhancements



Change Details:
 - Added bna_rx_ucast_listset() for synchronous ucast listadd operation.
 - Clear mac->handle before adding it to free_q.
 - bnad_set_rx_mode() rewritten. bnad_set_rx_mode() adds the MACs in uc_list
   to UCAM. If it exceeds the max supported, DEFAULT mode is turned on. If
   MCAM limit is exceeded, ALLMULTI mode is turned on.
 - Clear CF flags, check for the new mode and reprogram the Rx approach.
 - Added bnad_set_rx_ucast_fltr() and bnad_set_rx_mcast_fltr().
 - Check for IFF_PROMISC to set the correct mode.

Signed-off-by: default avatarRasesh Mody <rmody@brocade.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 20b298f5
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -497,11 +497,17 @@ enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
		 void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
		     void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
		 void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
		     void (*cbfn)(struct bnad *, struct bna_rx *));
void
bna_rx_mcast_delall(struct bna_rx *rx,
		    void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
		enum bna_rxmode bitmask,
@@ -509,6 +515,8 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
void bna_rx_vlan_strip_enable(struct bna_rx *rx);
void bna_rx_vlan_strip_disable(struct bna_rx *rx);
/* ENET */

/* API for RX */
+9 −1
Original line number Diff line number Diff line
@@ -1825,11 +1825,16 @@ static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
	struct list_head *qe;
	int i = 0;
	int i;

	i = 0;
	list_for_each(qe, &ucam_mod->free_q)
		i++;

	i = 0;
	list_for_each(qe, &ucam_mod->del_q)
		i++;

	ucam_mod->bna = NULL;
}

@@ -1877,6 +1882,9 @@ bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
	i = 0;
	list_for_each(qe, &mcam_mod->free_q) i++;

	i = 0;
	list_for_each(qe, &mcam_mod->del_q) i++;

	i = 0;
	list_for_each(qe, &mcam_mod->free_handle_q) i++;

+137 −1
Original line number Diff line number Diff line
@@ -915,6 +915,75 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
	return BNA_CB_SUCCESS;
}

enum bna_cb_status
bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
		     void (*cbfn)(struct bnad *, struct bna_rx *))
{
	struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
	struct bna_rxf *rxf = &rx->rxf;
	struct list_head list_head;
	struct list_head *qe;
	u8 *mcaddr;
	struct bna_mac *mac, *del_mac;
	int i;

	/* Purge the pending_add_q */
	while (!list_empty(&rxf->ucast_pending_add_q)) {
		bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
		bfa_q_qe_init(qe);
		mac = (struct bna_mac *)qe;
		bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
	}

	/* Schedule active_q entries for deletion */
	while (!list_empty(&rxf->ucast_active_q)) {
		bfa_q_deq(&rxf->ucast_active_q, &qe);
		mac = (struct bna_mac *)qe;
		bfa_q_qe_init(&mac->qe);

		del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
		memcpy(del_mac, mac, sizeof(*del_mac));
		list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
		bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
	}

	/* Allocate nodes */
	INIT_LIST_HEAD(&list_head);
	for (i = 0, mcaddr = uclist; i < count; i++) {
		mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
		if (mac == NULL)
			goto err_return;
		bfa_q_qe_init(&mac->qe);
		memcpy(mac->addr, mcaddr, ETH_ALEN);
		list_add_tail(&mac->qe, &list_head);
		mcaddr += ETH_ALEN;
	}

	/* Add the new entries */
	while (!list_empty(&list_head)) {
		bfa_q_deq(&list_head, &qe);
		mac = (struct bna_mac *)qe;
		bfa_q_qe_init(&mac->qe);
		list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
	}

	rxf->cam_fltr_cbfn = cbfn;
	rxf->cam_fltr_cbarg = rx->bna->bnad;
	bfa_fsm_send_event(rxf, RXF_E_CONFIG);

	return BNA_CB_SUCCESS;

err_return:
	while (!list_empty(&list_head)) {
		bfa_q_deq(&list_head, &qe);
		mac = (struct bna_mac *)qe;
		bfa_q_qe_init(&mac->qe);
		bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
	}

	return BNA_CB_UCAST_CAM_FULL;
}

enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
		     void (*cbfn)(struct bnad *, struct bna_rx *))
@@ -943,7 +1012,7 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,

		del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);

		memcpy(del_mac, mac, sizeof(*mac));
		memcpy(del_mac, mac, sizeof(*del_mac));
		list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
		mac->handle = NULL;
		bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
@@ -987,6 +1056,49 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
	return BNA_CB_MCAST_LIST_FULL;
}

void
bna_rx_mcast_delall(struct bna_rx *rx,
		    void (*cbfn)(struct bnad *, struct bna_rx *))
{
	struct bna_rxf *rxf = &rx->rxf;
	struct list_head *qe;
	struct bna_mac *mac, *del_mac;
	int need_hw_config = 0;

	/* Purge all entries from pending_add_q */
	while (!list_empty(&rxf->mcast_pending_add_q)) {
		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
		mac = (struct bna_mac *)qe;
		bfa_q_qe_init(&mac->qe);
		bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
	}

	/* Schedule all entries in active_q for deletion */
	while (!list_empty(&rxf->mcast_active_q)) {
		bfa_q_deq(&rxf->mcast_active_q, &qe);
		mac = (struct bna_mac *)qe;
		bfa_q_qe_init(&mac->qe);

		del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));

		memcpy(del_mac, mac, sizeof(*del_mac));
		list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
		mac->handle = NULL;
		bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
		need_hw_config = 1;
	}

	if (need_hw_config) {
		rxf->cam_fltr_cbfn = cbfn;
		rxf->cam_fltr_cbarg = rx->bna->bnad;
		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
		return;
	}

	if (cbfn)
		(*cbfn)(rx->bna->bnad, rx);
}

void
bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
@@ -2679,6 +2791,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx)
	bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
}

void
bna_rx_vlan_strip_enable(struct bna_rx *rx)
{
	struct bna_rxf *rxf = &rx->rxf;

	if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
		rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
		rxf->vlan_strip_pending = true;
		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
	}
}

void
bna_rx_vlan_strip_disable(struct bna_rx *rx)
{
	struct bna_rxf *rxf = &rx->rxf;

	if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
		rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
		rxf->vlan_strip_pending = true;
		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
	}
}

enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
		enum bna_rxmode bitmask,
+110 −53
Original line number Diff line number Diff line
@@ -2624,9 +2624,6 @@ bnad_stop(struct net_device *netdev)
	bnad_destroy_tx(bnad, 0);
	bnad_destroy_rx(bnad, 0);

	/* These config flags are cleared in the hardware */
	bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);

	/* Synchronize mailbox IRQ */
	bnad_mbox_irq_sync(bnad);

@@ -2939,73 +2936,133 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
	return stats;
}

void
bnad_set_rx_mode(struct net_device *netdev)
static void
bnad_set_rx_ucast_fltr(struct bnad *bnad)
{
	struct bnad *bnad = netdev_priv(netdev);
	u32	new_mask, valid_mask;
	unsigned long flags;
	struct net_device *netdev = bnad->netdev;
	int uc_count = netdev_uc_count(netdev);
	enum bna_cb_status ret;
	u8 *mac_list;
	struct netdev_hw_addr *ha;
	int entry;

	if (netdev_uc_empty(bnad->netdev)) {
		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
		return;
	}

	spin_lock_irqsave(&bnad->bna_lock, flags);
	if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
		goto mode_default;

	new_mask = valid_mask = 0;
	mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
	if (mac_list == NULL)
		goto mode_default;

	if (netdev->flags & IFF_PROMISC) {
		if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
			new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
			bnad->cfg_flags |= BNAD_CF_PROMISC;
		}
	} else {
		if (bnad->cfg_flags & BNAD_CF_PROMISC) {
			new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
			bnad->cfg_flags &= ~BNAD_CF_PROMISC;
	entry = 0;
	netdev_for_each_uc_addr(ha, netdev) {
		memcpy(&mac_list[entry * ETH_ALEN],
		       &ha->addr[0], ETH_ALEN);
		entry++;
	}

	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
			mac_list, NULL);
	kfree(mac_list);

	if (ret != BNA_CB_SUCCESS)
		goto mode_default;

	return;

	/* ucast packets not in UCAM are routed to default function */
mode_default:
	bnad->cfg_flags |= BNAD_CF_DEFAULT;
	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
}

	if (netdev->flags & IFF_ALLMULTI) {
		if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
			new_mask |= BNA_RXMODE_ALLMULTI;
			valid_mask |= BNA_RXMODE_ALLMULTI;
static void
bnad_set_rx_mcast_fltr(struct bnad *bnad)
{
	struct net_device *netdev = bnad->netdev;
	int mc_count = netdev_mc_count(netdev);
	enum bna_cb_status ret;
	u8 *mac_list;

	if (netdev->flags & IFF_ALLMULTI)
		goto mode_allmulti;

	if (netdev_mc_empty(netdev))
		return;

	if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
		goto mode_allmulti;

	mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);

	if (mac_list == NULL)
		goto mode_allmulti;

	memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);

	/* copy rest of the MCAST addresses */
	bnad_netdev_mc_list_get(netdev, mac_list);
	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
			mac_list, NULL);
	kfree(mac_list);

	if (ret != BNA_CB_SUCCESS)
		goto mode_allmulti;

	return;

mode_allmulti:
	bnad->cfg_flags |= BNAD_CF_ALLMULTI;
		}
	} else {
		if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
			new_mask &= ~BNA_RXMODE_ALLMULTI;
			valid_mask |= BNA_RXMODE_ALLMULTI;
			bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
		}
	bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
}

	if (bnad->rx_info[0].rx == NULL)
		goto unlock;
void
bnad_set_rx_mode(struct net_device *netdev)
{
	struct bnad *bnad = netdev_priv(netdev);
	enum bna_rxmode new_mode, mode_mask;
	unsigned long flags;

	bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
	spin_lock_irqsave(&bnad->bna_lock, flags);

	if (!netdev_mc_empty(netdev)) {
		u8 *mcaddr_list;
		int mc_count = netdev_mc_count(netdev);
	if (bnad->rx_info[0].rx == NULL) {
		spin_unlock_irqrestore(&bnad->bna_lock, flags);
		return;
	}

		/* Index 0 holds the broadcast address */
		mcaddr_list =
			kzalloc((mc_count + 1) * ETH_ALEN,
				GFP_ATOMIC);
		if (!mcaddr_list)
			goto unlock;
	/* clear bnad flags to update it with new settings */
	bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
			BNAD_CF_ALLMULTI);

		memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
	new_mode = 0;
	if (netdev->flags & IFF_PROMISC) {
		new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
		bnad->cfg_flags |= BNAD_CF_PROMISC;
	} else {
		bnad_set_rx_mcast_fltr(bnad);

		/* Copy rest of the MC addresses */
		bnad_netdev_mc_list_get(netdev, mcaddr_list);
		if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
			new_mode |= BNA_RXMODE_ALLMULTI;

		bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
					mcaddr_list, NULL);
		bnad_set_rx_ucast_fltr(bnad);

		/* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
		kfree(mcaddr_list);
		if (bnad->cfg_flags & BNAD_CF_DEFAULT)
			new_mode |= BNA_RXMODE_DEFAULT;
	}
unlock:

	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
			BNA_RXMODE_ALLMULTI;
	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);

	if (bnad->cfg_flags & BNAD_CF_PROMISC)
		bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
	else
		bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);

	spin_unlock_irqrestore(&bnad->bna_lock, flags);
}

+2 −1
Original line number Diff line number Diff line
@@ -260,7 +260,8 @@ struct bnad_rx_unmap_q {
#define	BNAD_CF_DIM_ENABLED		0x01	/* DIM */
#define	BNAD_CF_PROMISC			0x02
#define BNAD_CF_ALLMULTI		0x04
#define	BNAD_CF_MSIX			0x08	/* If in MSIx mode */
#define	BNAD_CF_DEFAULT			0x08
#define	BNAD_CF_MSIX			0x10	/* If in MSIx mode */

/* Defines for run_flags bit-mask */
/* Set, tested & cleared using xxx_bit() functions */