Loading include/linux/if_bridge.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -103,7 +103,7 @@ struct __fdb_entry { extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); typedef int (*br_should_route_hook_t)(struct sk_buff *skb); typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; extern br_should_route_hook_t __rcu *br_should_route_hook; #endif #endif Loading include/linux/netfilter/x_tables.h +5 −5 Original line number Original line Diff line number Diff line Loading @@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info); * necessary for reading the counters. * necessary for reading the counters. */ */ struct xt_info_lock { struct xt_info_lock { spinlock_t lock; seqlock_t lock; unsigned char readers; unsigned char readers; }; }; DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); Loading @@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void) local_bh_disable(); local_bh_disable(); lock = &__get_cpu_var(xt_info_locks); lock = &__get_cpu_var(xt_info_locks); if (likely(!lock->readers++)) if (likely(!lock->readers++)) spin_lock(&lock->lock); write_seqlock(&lock->lock); } } static inline void xt_info_rdunlock_bh(void) static inline void xt_info_rdunlock_bh(void) Loading @@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void) struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); if (likely(!--lock->readers)) if (likely(!--lock->readers)) spin_unlock(&lock->lock); write_sequnlock(&lock->lock); local_bh_enable(); local_bh_enable(); } } Loading @@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void) */ */ static inline void xt_info_wrlock(unsigned int cpu) static inline void xt_info_wrlock(unsigned int cpu) { { spin_lock(&per_cpu(xt_info_locks, cpu).lock); write_seqlock(&per_cpu(xt_info_locks, cpu).lock); } } static inline void xt_info_wrunlock(unsigned int cpu) static inline void xt_info_wrunlock(unsigned int cpu) { { spin_unlock(&per_cpu(xt_info_locks, cpu).lock); write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); } } /* /* Loading net/ipv4/netfilter/arp_tables.c +14 −31 Original line number Original line Diff line number Diff line Loading @@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t, struct arpt_entry *iter; struct arpt_entry *iter; unsigned int cpu; unsigned int cpu; unsigned int i; unsigned int i; unsigned int curcpu = get_cpu(); /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU * * Bottom half has to be disabled to prevent deadlock * if new softirq were to run and call ipt_do_table */ local_bh_disable(); i = 0; xt_entry_foreach(iter, t->entries[curcpu], t->size) { SET_COUNTER(counters[i], iter->counters.bcnt, iter->counters.pcnt); ++i; } local_bh_enable(); /* Processing counters from other cpus, we can let bottom half enabled, * (preemption is disabled) */ for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; continue; i = 0; i = 0; local_bh_disable(); xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) { ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt; iter->counters.pcnt); unsigned int start; do { start = read_seqbegin(lock); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; ++i; } } xt_info_wrunlock(cpu); local_bh_enable(); } } put_cpu(); } } static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table) Loading @@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) * about). * about). */ */ countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc(countersize); counters = vzalloc(countersize); if (counters == NULL) if (counters == NULL) return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM); Loading Loading @@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name, struct arpt_entry *iter; struct arpt_entry *iter; ret = 0; ret = 0; counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { if (!counters) { ret = -ENOMEM; ret = -ENOMEM; goto out; goto out; Loading net/ipv4/netfilter/ip_tables.c +14 −31 Original line number Original line Diff line number Diff line Loading @@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t, struct ipt_entry *iter; struct ipt_entry *iter; unsigned int cpu; unsigned int cpu; unsigned int i; unsigned int i; unsigned int curcpu = get_cpu(); /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU. * * Bottom half has to be disabled to prevent deadlock * if new softirq were to run and call ipt_do_table */ local_bh_disable(); i = 0; xt_entry_foreach(iter, t->entries[curcpu], t->size) { SET_COUNTER(counters[i], iter->counters.bcnt, iter->counters.pcnt); ++i; } local_bh_enable(); /* Processing counters from other cpus, we can let bottom half enabled, * (preemption is disabled) */ for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; continue; i = 0; i = 0; local_bh_disable(); xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) { ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt; iter->counters.pcnt); unsigned int start; do { start = read_seqbegin(lock); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ ++i; /* macro does multi eval of i */ } } xt_info_wrunlock(cpu); local_bh_enable(); } } put_cpu(); } } static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table) Loading @@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care about). */ about). */ countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc(countersize); counters = vzalloc(countersize); if (counters == NULL) if (counters == NULL) return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM); Loading Loading @@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct ipt_entry *iter; struct ipt_entry *iter; ret = 0; ret = 0; counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { if (!counters) { ret = -ENOMEM; ret = -ENOMEM; goto out; goto out; Loading net/ipv6/netfilter/ip6_tables.c +14 −31 Original line number Original line Diff line number Diff line Loading @@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t, struct ip6t_entry *iter; struct ip6t_entry *iter; unsigned int cpu; unsigned int cpu; unsigned int i; unsigned int i; unsigned int curcpu = get_cpu(); /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU * * Bottom half has to be disabled to prevent deadlock * if new softirq were to run and call ipt_do_table */ local_bh_disable(); i = 0; xt_entry_foreach(iter, t->entries[curcpu], t->size) { SET_COUNTER(counters[i], iter->counters.bcnt, iter->counters.pcnt); ++i; } local_bh_enable(); /* Processing counters from other cpus, we can let bottom half enabled, * (preemption is disabled) */ for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; continue; i = 0; i = 0; local_bh_disable(); xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) { ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt; iter->counters.pcnt); unsigned int start; do { start = read_seqbegin(lock); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; ++i; } } xt_info_wrunlock(cpu); local_bh_enable(); } } put_cpu(); } } static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table) Loading @@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care about). */ about). */ countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc(countersize); counters = vzalloc(countersize); if (counters == NULL) if (counters == NULL) return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM); Loading Loading @@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct ip6t_entry *iter; struct ip6t_entry *iter; ret = 0; ret = 0; counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { if (!counters) { ret = -ENOMEM; ret = -ENOMEM; goto out; goto out; Loading Loading
include/linux/if_bridge.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -103,7 +103,7 @@ struct __fdb_entry { extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); typedef int (*br_should_route_hook_t)(struct sk_buff *skb); typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; extern br_should_route_hook_t __rcu *br_should_route_hook; #endif #endif Loading
include/linux/netfilter/x_tables.h +5 −5 Original line number Original line Diff line number Diff line Loading @@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info); * necessary for reading the counters. * necessary for reading the counters. */ */ struct xt_info_lock { struct xt_info_lock { spinlock_t lock; seqlock_t lock; unsigned char readers; unsigned char readers; }; }; DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); Loading @@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void) local_bh_disable(); local_bh_disable(); lock = &__get_cpu_var(xt_info_locks); lock = &__get_cpu_var(xt_info_locks); if (likely(!lock->readers++)) if (likely(!lock->readers++)) spin_lock(&lock->lock); write_seqlock(&lock->lock); } } static inline void xt_info_rdunlock_bh(void) static inline void xt_info_rdunlock_bh(void) Loading @@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void) struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); if (likely(!--lock->readers)) if (likely(!--lock->readers)) spin_unlock(&lock->lock); write_sequnlock(&lock->lock); local_bh_enable(); local_bh_enable(); } } Loading @@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void) */ */ static inline void xt_info_wrlock(unsigned int cpu) static inline void xt_info_wrlock(unsigned int cpu) { { spin_lock(&per_cpu(xt_info_locks, cpu).lock); write_seqlock(&per_cpu(xt_info_locks, cpu).lock); } } static inline void xt_info_wrunlock(unsigned int cpu) static inline void xt_info_wrunlock(unsigned int cpu) { { spin_unlock(&per_cpu(xt_info_locks, cpu).lock); write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); } } /* /* Loading
net/ipv4/netfilter/arp_tables.c +14 −31 Original line number Original line Diff line number Diff line Loading @@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t, struct arpt_entry *iter; struct arpt_entry *iter; unsigned int cpu; unsigned int cpu; unsigned int i; unsigned int i; unsigned int curcpu = get_cpu(); /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU * * Bottom half has to be disabled to prevent deadlock * if new softirq were to run and call ipt_do_table */ local_bh_disable(); i = 0; xt_entry_foreach(iter, t->entries[curcpu], t->size) { SET_COUNTER(counters[i], iter->counters.bcnt, iter->counters.pcnt); ++i; } local_bh_enable(); /* Processing counters from other cpus, we can let bottom half enabled, * (preemption is disabled) */ for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; continue; i = 0; i = 0; local_bh_disable(); xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) { ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt; iter->counters.pcnt); unsigned int start; do { start = read_seqbegin(lock); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; ++i; } } xt_info_wrunlock(cpu); local_bh_enable(); } } put_cpu(); } } static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table) Loading @@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) * about). * about). */ */ countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc(countersize); counters = vzalloc(countersize); if (counters == NULL) if (counters == NULL) return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM); Loading Loading @@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name, struct arpt_entry *iter; struct arpt_entry *iter; ret = 0; ret = 0; counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { if (!counters) { ret = -ENOMEM; ret = -ENOMEM; goto out; goto out; Loading
net/ipv4/netfilter/ip_tables.c +14 −31 Original line number Original line Diff line number Diff line Loading @@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t, struct ipt_entry *iter; struct ipt_entry *iter; unsigned int cpu; unsigned int cpu; unsigned int i; unsigned int i; unsigned int curcpu = get_cpu(); /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU. * * Bottom half has to be disabled to prevent deadlock * if new softirq were to run and call ipt_do_table */ local_bh_disable(); i = 0; xt_entry_foreach(iter, t->entries[curcpu], t->size) { SET_COUNTER(counters[i], iter->counters.bcnt, iter->counters.pcnt); ++i; } local_bh_enable(); /* Processing counters from other cpus, we can let bottom half enabled, * (preemption is disabled) */ for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; continue; i = 0; i = 0; local_bh_disable(); xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) { ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt; iter->counters.pcnt); unsigned int start; do { start = read_seqbegin(lock); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ ++i; /* macro does multi eval of i */ } } xt_info_wrunlock(cpu); local_bh_enable(); } } put_cpu(); } } static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table) Loading @@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care about). */ about). */ countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc(countersize); counters = vzalloc(countersize); if (counters == NULL) if (counters == NULL) return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM); Loading Loading @@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct ipt_entry *iter; struct ipt_entry *iter; ret = 0; ret = 0; counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { if (!counters) { ret = -ENOMEM; ret = -ENOMEM; goto out; goto out; Loading
net/ipv6/netfilter/ip6_tables.c +14 −31 Original line number Original line Diff line number Diff line Loading @@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t, struct ip6t_entry *iter; struct ip6t_entry *iter; unsigned int cpu; unsigned int cpu; unsigned int i; unsigned int i; unsigned int curcpu = get_cpu(); /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU * * Bottom half has to be disabled to prevent deadlock * if new softirq were to run and call ipt_do_table */ local_bh_disable(); i = 0; xt_entry_foreach(iter, t->entries[curcpu], t->size) { SET_COUNTER(counters[i], iter->counters.bcnt, iter->counters.pcnt); ++i; } local_bh_enable(); /* Processing counters from other cpus, we can let bottom half enabled, * (preemption is disabled) */ for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) { if (cpu == curcpu) seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; continue; i = 0; i = 0; local_bh_disable(); xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { xt_entry_foreach(iter, t->entries[cpu], t->size) { ADD_COUNTER(counters[i], iter->counters.bcnt, u64 bcnt, pcnt; iter->counters.pcnt); unsigned int start; do { start = read_seqbegin(lock); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; ++i; } } xt_info_wrunlock(cpu); local_bh_enable(); } } put_cpu(); } } static struct xt_counters *alloc_counters(const struct xt_table *table) static struct xt_counters *alloc_counters(const struct xt_table *table) Loading @@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care about). */ about). */ countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc(countersize); counters = vzalloc(countersize); if (counters == NULL) if (counters == NULL) return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM); Loading Loading @@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct ip6t_entry *iter; struct ip6t_entry *iter; ret = 0; ret = 0; counters = vmalloc(num_counters * sizeof(struct xt_counters)); counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { if (!counters) { ret = -ENOMEM; ret = -ENOMEM; goto out; goto out; Loading