Loading drivers/net/iseries_veth.c +23 −9 Original line number Diff line number Diff line Loading @@ -924,7 +924,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, spin_lock_irqsave(&cnx->lock, flags); if (! cnx->state & VETH_STATE_READY) if (! (cnx->state & VETH_STATE_READY)) goto drop; if ((skb->len - 14) > VETH_MAX_MTU) Loading Loading @@ -1023,6 +1023,8 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) lpmask = veth_transmit_to_many(skb, lpmask, dev); dev->trans_start = jiffies; if (! lpmask) { dev_kfree_skb(skb); } else { Loading Loading @@ -1262,13 +1264,18 @@ static void veth_receive(struct veth_lpar_connection *cnx, vlan = skb->data[9]; dev = veth_dev[vlan]; if (! dev) /* Some earlier versions of the driver sent broadcasts down all connections, even to lpars that weren't on the relevant vlan. So ignore packets belonging to a vlan we're not on. */ if (! dev) { /* * Some earlier versions of the driver sent * broadcasts down all connections, even to lpars * that weren't on the relevant vlan. So ignore * packets belonging to a vlan we're not on. * We can also be here if we receive packets while * the driver is going down, because then dev is NULL. */ dev_kfree_skb_irq(skb); continue; } port = (struct veth_port *)dev->priv; dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000; Loading Loading @@ -1381,18 +1388,25 @@ void __exit veth_module_cleanup(void) { int i; vio_unregister_driver(&veth_driver); /* Stop the queues first to stop any new packets being sent. */ for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) if (veth_dev[i]) netif_stop_queue(veth_dev[i]); /* Stop the connections before we unregister the driver. This * ensures there's no skbs lying around holding the device open. */ for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) veth_stop_connection(i); HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan); /* Hypervisor callbacks may have scheduled more work while we * were destroying connections. Now that we've disconnected from * were stoping connections. Now that we've disconnected from * the hypervisor make sure everything's finished. */ flush_scheduled_work(); vio_unregister_driver(&veth_driver); for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) veth_destroy_connection(i); Loading Loading
drivers/net/iseries_veth.c +23 −9 Original line number Diff line number Diff line Loading @@ -924,7 +924,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, spin_lock_irqsave(&cnx->lock, flags); if (! cnx->state & VETH_STATE_READY) if (! (cnx->state & VETH_STATE_READY)) goto drop; if ((skb->len - 14) > VETH_MAX_MTU) Loading Loading @@ -1023,6 +1023,8 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) lpmask = veth_transmit_to_many(skb, lpmask, dev); dev->trans_start = jiffies; if (! lpmask) { dev_kfree_skb(skb); } else { Loading Loading @@ -1262,13 +1264,18 @@ static void veth_receive(struct veth_lpar_connection *cnx, vlan = skb->data[9]; dev = veth_dev[vlan]; if (! dev) /* Some earlier versions of the driver sent broadcasts down all connections, even to lpars that weren't on the relevant vlan. So ignore packets belonging to a vlan we're not on. */ if (! dev) { /* * Some earlier versions of the driver sent * broadcasts down all connections, even to lpars * that weren't on the relevant vlan. So ignore * packets belonging to a vlan we're not on. * We can also be here if we receive packets while * the driver is going down, because then dev is NULL. */ dev_kfree_skb_irq(skb); continue; } port = (struct veth_port *)dev->priv; dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000; Loading Loading @@ -1381,18 +1388,25 @@ void __exit veth_module_cleanup(void) { int i; vio_unregister_driver(&veth_driver); /* Stop the queues first to stop any new packets being sent. */ for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) if (veth_dev[i]) netif_stop_queue(veth_dev[i]); /* Stop the connections before we unregister the driver. This * ensures there's no skbs lying around holding the device open. */ for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) veth_stop_connection(i); HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan); /* Hypervisor callbacks may have scheduled more work while we * were destroying connections. Now that we've disconnected from * were stoping connections. Now that we've disconnected from * the hypervisor make sure everything's finished. */ flush_scheduled_work(); vio_unregister_driver(&veth_driver); for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) veth_destroy_connection(i); Loading