Loading drivers/vhost/vhost.c +6 −1 Original line number Diff line number Diff line Loading @@ -1036,7 +1036,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) /* This actually signals the guest, using eventfd. */ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) { __u16 flags = 0; __u16 flags; /* Flush out used index updates. This is paired * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); if (get_user(flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return; Loading include/linux/if_link.h +19 −4 Original line number Diff line number Diff line Loading @@ -111,10 +111,7 @@ enum { IFLA_NET_NS_PID, IFLA_IFALIAS, IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VF_VLAN, IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ IFLA_VFINFO, IFLA_VFINFO_LIST, IFLA_STATS64, __IFLA_MAX }; Loading Loading @@ -236,6 +233,24 @@ enum macvlan_mode { /* SR-IOV virtual function managment section */ enum { IFLA_VF_INFO_UNSPEC, IFLA_VF_INFO, __IFLA_VF_INFO_MAX, }; #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) enum { IFLA_VF_UNSPEC, IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VF_VLAN, IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ __IFLA_VF_MAX, }; #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) struct ifla_vf_mac { __u32 vf; __u8 mac[32]; /* MAX_ADDR_LEN */ Loading include/net/tcp.h +3 −18 Original line number Diff line number Diff line Loading @@ -1206,30 +1206,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk, extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); extern void tcp_free_md5sig_pool(void); extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); extern void __tcp_put_md5sig_pool(void); extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); extern void tcp_put_md5sig_pool(void); extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, unsigned header_len); extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key); static inline struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { int cpu = get_cpu(); struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); if (!ret) put_cpu(); return ret; } static inline void tcp_put_md5sig_pool(void) { __tcp_put_md5sig_pool(); put_cpu(); } /* write queue abstraction */ static inline void tcp_write_queue_purge(struct sock *sk) { Loading net/core/rtnetlink.c +110 −49 Original line number Diff line number Diff line Loading @@ -644,12 +644,19 @@ static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) memcpy(v, &a, sizeof(a)); } /* All VF info */ static inline int rtnl_vfinfo_size(const struct net_device *dev) { if (dev->dev.parent && dev_is_pci(dev->dev.parent)) return dev_num_vf(dev->dev.parent) * sizeof(struct ifla_vf_info); else if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { int num_vfs = dev_num_vf(dev->dev.parent); size_t size = nlmsg_total_size(sizeof(struct nlattr)); size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); size += num_vfs * (sizeof(struct ifla_vf_mac) + sizeof(struct ifla_vf_vlan) + sizeof(struct ifla_vf_tx_rate)); return size; } else return 0; } Loading @@ -672,7 +679,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_NUM_VF */ + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ } Loading Loading @@ -749,14 +756,37 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { int i; struct ifla_vf_info ivi; NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { struct nlattr *vfinfo, *vf; int num_vfs = dev_num_vf(dev->dev.parent); NLA_PUT_U32(skb, IFLA_NUM_VF, num_vfs); vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); if (!vfinfo) goto nla_put_failure; for (i = 0; i < num_vfs; i++) { struct ifla_vf_info ivi; struct ifla_vf_mac vf_mac; struct ifla_vf_vlan vf_vlan; struct ifla_vf_tx_rate vf_tx_rate; if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf; memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); vf_vlan.vlan = ivi.vlan; vf_vlan.qos = ivi.qos; vf_tx_rate.rate = ivi.tx_rate; vf = nla_nest_start(skb, IFLA_VF_INFO); if (!vf) { nla_nest_cancel(skb, vfinfo); goto nla_put_failure; } NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate); nla_nest_end(skb, vf); } nla_nest_end(skb, vfinfo); } if (dev->rtnl_link_ops) { if (rtnl_link_fill(skb, dev) < 0) Loading Loading @@ -818,12 +848,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_LINKINFO] = { .type = NLA_NESTED }, [IFLA_NET_NS_PID] = { .type = NLA_U32 }, [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, [IFLA_VF_MAC] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_mac) }, [IFLA_VF_VLAN] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_vlan) }, [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_tx_rate) }, [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, }; EXPORT_SYMBOL(ifla_policy); Loading @@ -832,6 +857,19 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { [IFLA_INFO_DATA] = { .type = NLA_NESTED }, }; static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { [IFLA_VF_INFO] = { .type = NLA_NESTED }, }; static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { [IFLA_VF_MAC] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_mac) }, [IFLA_VF_VLAN] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_vlan) }, [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_tx_rate) }, }; struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { struct net *net; Loading Loading @@ -861,6 +899,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) return 0; } static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) { int rem, err = -EINVAL; struct nlattr *vf; const struct net_device_ops *ops = dev->netdev_ops; nla_for_each_nested(vf, attr, rem) { switch (nla_type(vf)) { case IFLA_VF_MAC: { struct ifla_vf_mac *ivm; ivm = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_mac) err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); break; } case IFLA_VF_VLAN: { struct ifla_vf_vlan *ivv; ivv = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_vlan) err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, ivv->qos); break; } case IFLA_VF_TX_RATE: { struct ifla_vf_tx_rate *ivt; ivt = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_tx_rate) err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); break; } default: err = -EINVAL; break; } if (err) break; } return err; } static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, struct nlattr **tb, char *ifname, int modified) { Loading Loading @@ -991,40 +1075,17 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, write_unlock_bh(&dev_base_lock); } if (tb[IFLA_VF_MAC]) { struct ifla_vf_mac *ivm; ivm = nla_data(tb[IFLA_VF_MAC]); err = -EOPNOTSUPP; if (ops->ndo_set_vf_mac) err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); if (err < 0) if (tb[IFLA_VFINFO_LIST]) { struct nlattr *attr; int rem; nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { if (nla_type(attr) != IFLA_VF_INFO) goto errout; modified = 1; } if (tb[IFLA_VF_VLAN]) { struct ifla_vf_vlan *ivv; ivv = nla_data(tb[IFLA_VF_VLAN]); err = -EOPNOTSUPP; if (ops->ndo_set_vf_vlan) err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, ivv->qos); err = do_setvfinfo(dev, attr); if (err < 0) goto errout; modified = 1; } err = 0; if (tb[IFLA_VF_TX_RATE]) { struct ifla_vf_tx_rate *ivt; ivt = nla_data(tb[IFLA_VF_TX_RATE]); err = -EOPNOTSUPP; if (ops->ndo_set_vf_tx_rate) err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); if (err < 0) goto errout; modified = 1; } err = 0; Loading net/ipv4/tcp.c +24 −10 Original line number Diff line number Diff line Loading @@ -2840,7 +2840,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) if (p->md5_desc.tfm) crypto_free_hash(p->md5_desc.tfm); kfree(p); p = NULL; } } free_percpu(pool); Loading Loading @@ -2938,25 +2937,40 @@ retry: EXPORT_SYMBOL(tcp_alloc_md5sig_pool); struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) /** * tcp_get_md5sig_pool - get md5sig_pool for this user * * We use percpu structure, so if we succeed, we exit with preemption * and BH disabled, to make sure another thread or softirq handling * wont try to get same context. */ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { struct tcp_md5sig_pool * __percpu *p; spin_lock_bh(&tcp_md5sig_pool_lock); local_bh_disable(); spin_lock(&tcp_md5sig_pool_lock); p = tcp_md5sig_pool; if (p) tcp_md5sig_users++; spin_unlock_bh(&tcp_md5sig_pool_lock); return (p ? *per_cpu_ptr(p, cpu) : NULL); } spin_unlock(&tcp_md5sig_pool_lock); if (p) return *per_cpu_ptr(p, smp_processor_id()); EXPORT_SYMBOL(__tcp_get_md5sig_pool); local_bh_enable(); return NULL; } EXPORT_SYMBOL(tcp_get_md5sig_pool); void __tcp_put_md5sig_pool(void) void tcp_put_md5sig_pool(void) { local_bh_enable(); tcp_free_md5sig_pool(); } EXPORT_SYMBOL(__tcp_put_md5sig_pool); EXPORT_SYMBOL(tcp_put_md5sig_pool); int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, struct tcphdr *th) Loading Loading
drivers/vhost/vhost.c +6 −1 Original line number Diff line number Diff line Loading @@ -1036,7 +1036,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) /* This actually signals the guest, using eventfd. */ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) { __u16 flags = 0; __u16 flags; /* Flush out used index updates. This is paired * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); if (get_user(flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return; Loading
include/linux/if_link.h +19 −4 Original line number Diff line number Diff line Loading @@ -111,10 +111,7 @@ enum { IFLA_NET_NS_PID, IFLA_IFALIAS, IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VF_VLAN, IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ IFLA_VFINFO, IFLA_VFINFO_LIST, IFLA_STATS64, __IFLA_MAX }; Loading Loading @@ -236,6 +233,24 @@ enum macvlan_mode { /* SR-IOV virtual function managment section */ enum { IFLA_VF_INFO_UNSPEC, IFLA_VF_INFO, __IFLA_VF_INFO_MAX, }; #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) enum { IFLA_VF_UNSPEC, IFLA_VF_MAC, /* Hardware queue specific attributes */ IFLA_VF_VLAN, IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ __IFLA_VF_MAX, }; #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) struct ifla_vf_mac { __u32 vf; __u8 mac[32]; /* MAX_ADDR_LEN */ Loading
include/net/tcp.h +3 −18 Original line number Diff line number Diff line Loading @@ -1206,30 +1206,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk, extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); extern void tcp_free_md5sig_pool(void); extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); extern void __tcp_put_md5sig_pool(void); extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); extern void tcp_put_md5sig_pool(void); extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, unsigned header_len); extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key); static inline struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { int cpu = get_cpu(); struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); if (!ret) put_cpu(); return ret; } static inline void tcp_put_md5sig_pool(void) { __tcp_put_md5sig_pool(); put_cpu(); } /* write queue abstraction */ static inline void tcp_write_queue_purge(struct sock *sk) { Loading
net/core/rtnetlink.c +110 −49 Original line number Diff line number Diff line Loading @@ -644,12 +644,19 @@ static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) memcpy(v, &a, sizeof(a)); } /* All VF info */ static inline int rtnl_vfinfo_size(const struct net_device *dev) { if (dev->dev.parent && dev_is_pci(dev->dev.parent)) return dev_num_vf(dev->dev.parent) * sizeof(struct ifla_vf_info); else if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { int num_vfs = dev_num_vf(dev->dev.parent); size_t size = nlmsg_total_size(sizeof(struct nlattr)); size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); size += num_vfs * (sizeof(struct ifla_vf_mac) + sizeof(struct ifla_vf_vlan) + sizeof(struct ifla_vf_tx_rate)); return size; } else return 0; } Loading @@ -672,7 +679,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_NUM_VF */ + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ } Loading Loading @@ -749,14 +756,37 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { int i; struct ifla_vf_info ivi; NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { struct nlattr *vfinfo, *vf; int num_vfs = dev_num_vf(dev->dev.parent); NLA_PUT_U32(skb, IFLA_NUM_VF, num_vfs); vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); if (!vfinfo) goto nla_put_failure; for (i = 0; i < num_vfs; i++) { struct ifla_vf_info ivi; struct ifla_vf_mac vf_mac; struct ifla_vf_vlan vf_vlan; struct ifla_vf_tx_rate vf_tx_rate; if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf; memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); vf_vlan.vlan = ivi.vlan; vf_vlan.qos = ivi.qos; vf_tx_rate.rate = ivi.tx_rate; vf = nla_nest_start(skb, IFLA_VF_INFO); if (!vf) { nla_nest_cancel(skb, vfinfo); goto nla_put_failure; } NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate); nla_nest_end(skb, vf); } nla_nest_end(skb, vfinfo); } if (dev->rtnl_link_ops) { if (rtnl_link_fill(skb, dev) < 0) Loading Loading @@ -818,12 +848,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_LINKINFO] = { .type = NLA_NESTED }, [IFLA_NET_NS_PID] = { .type = NLA_U32 }, [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, [IFLA_VF_MAC] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_mac) }, [IFLA_VF_VLAN] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_vlan) }, [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_tx_rate) }, [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, }; EXPORT_SYMBOL(ifla_policy); Loading @@ -832,6 +857,19 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { [IFLA_INFO_DATA] = { .type = NLA_NESTED }, }; static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { [IFLA_VF_INFO] = { .type = NLA_NESTED }, }; static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { [IFLA_VF_MAC] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_mac) }, [IFLA_VF_VLAN] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_vlan) }, [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, .len = sizeof(struct ifla_vf_tx_rate) }, }; struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { struct net *net; Loading Loading @@ -861,6 +899,52 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) return 0; } static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) { int rem, err = -EINVAL; struct nlattr *vf; const struct net_device_ops *ops = dev->netdev_ops; nla_for_each_nested(vf, attr, rem) { switch (nla_type(vf)) { case IFLA_VF_MAC: { struct ifla_vf_mac *ivm; ivm = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_mac) err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); break; } case IFLA_VF_VLAN: { struct ifla_vf_vlan *ivv; ivv = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_vlan) err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, ivv->qos); break; } case IFLA_VF_TX_RATE: { struct ifla_vf_tx_rate *ivt; ivt = nla_data(vf); err = -EOPNOTSUPP; if (ops->ndo_set_vf_tx_rate) err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); break; } default: err = -EINVAL; break; } if (err) break; } return err; } static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, struct nlattr **tb, char *ifname, int modified) { Loading Loading @@ -991,40 +1075,17 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, write_unlock_bh(&dev_base_lock); } if (tb[IFLA_VF_MAC]) { struct ifla_vf_mac *ivm; ivm = nla_data(tb[IFLA_VF_MAC]); err = -EOPNOTSUPP; if (ops->ndo_set_vf_mac) err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); if (err < 0) if (tb[IFLA_VFINFO_LIST]) { struct nlattr *attr; int rem; nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { if (nla_type(attr) != IFLA_VF_INFO) goto errout; modified = 1; } if (tb[IFLA_VF_VLAN]) { struct ifla_vf_vlan *ivv; ivv = nla_data(tb[IFLA_VF_VLAN]); err = -EOPNOTSUPP; if (ops->ndo_set_vf_vlan) err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, ivv->qos); err = do_setvfinfo(dev, attr); if (err < 0) goto errout; modified = 1; } err = 0; if (tb[IFLA_VF_TX_RATE]) { struct ifla_vf_tx_rate *ivt; ivt = nla_data(tb[IFLA_VF_TX_RATE]); err = -EOPNOTSUPP; if (ops->ndo_set_vf_tx_rate) err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); if (err < 0) goto errout; modified = 1; } err = 0; Loading
net/ipv4/tcp.c +24 −10 Original line number Diff line number Diff line Loading @@ -2840,7 +2840,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) if (p->md5_desc.tfm) crypto_free_hash(p->md5_desc.tfm); kfree(p); p = NULL; } } free_percpu(pool); Loading Loading @@ -2938,25 +2937,40 @@ retry: EXPORT_SYMBOL(tcp_alloc_md5sig_pool); struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) /** * tcp_get_md5sig_pool - get md5sig_pool for this user * * We use percpu structure, so if we succeed, we exit with preemption * and BH disabled, to make sure another thread or softirq handling * wont try to get same context. */ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { struct tcp_md5sig_pool * __percpu *p; spin_lock_bh(&tcp_md5sig_pool_lock); local_bh_disable(); spin_lock(&tcp_md5sig_pool_lock); p = tcp_md5sig_pool; if (p) tcp_md5sig_users++; spin_unlock_bh(&tcp_md5sig_pool_lock); return (p ? *per_cpu_ptr(p, cpu) : NULL); } spin_unlock(&tcp_md5sig_pool_lock); if (p) return *per_cpu_ptr(p, smp_processor_id()); EXPORT_SYMBOL(__tcp_get_md5sig_pool); local_bh_enable(); return NULL; } EXPORT_SYMBOL(tcp_get_md5sig_pool); void __tcp_put_md5sig_pool(void) void tcp_put_md5sig_pool(void) { local_bh_enable(); tcp_free_md5sig_pool(); } EXPORT_SYMBOL(__tcp_put_md5sig_pool); EXPORT_SYMBOL(tcp_put_md5sig_pool); int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, struct tcphdr *th) Loading