Loading include/linux/netfilter/ipset/ip_set_getport.h +2 −0 Original line number Diff line number Diff line Loading @@ -22,7 +22,9 @@ static inline bool ip_set_proto_with_ports(u8 proto) { switch (proto) { case IPPROTO_TCP: case IPPROTO_SCTP: case IPPROTO_UDP: case IPPROTO_UDPLITE: return true; } return false; Loading include/linux/netfilter/x_tables.h +42 −54 Original line number Diff line number Diff line Loading @@ -456,72 +456,60 @@ extern void xt_proto_fini(struct net *net, u_int8_t af); extern struct xt_table_info *xt_alloc_table_info(unsigned int size); extern void xt_free_table_info(struct xt_table_info *info); /* * Per-CPU spinlock associated with per-cpu table entries, and * with a counter for the "reading" side that allows a recursive * reader to avoid taking the lock and deadlocking. * * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu. * It needs to ensure that the rules are not being changed while the packet * is being processed. In some cases, the read lock will be acquired * twice on the same CPU; this is okay because of the count. /** * xt_recseq - recursive seqcount for netfilter use * * "writing" is used when reading counters. * During replace any readers that are using the old tables have to complete * before freeing the old table. This is handled by the write locking * necessary for reading the counters. * Packet processing changes the seqcount only if no recursion happened * get_counters() can use read_seqcount_begin()/read_seqcount_retry(), * because we use the normal seqcount convention : * Low order bit set to 1 if a writer is active. */ struct xt_info_lock { seqlock_t lock; unsigned char readers; }; DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); DECLARE_PER_CPU(seqcount_t, xt_recseq); /* * Note: we need to ensure that preemption is disabled before acquiring * the per-cpu-variable, so we do it as a two step process rather than * using "spin_lock_bh()". * * We _also_ need to disable bottom half processing before updating our * nesting count, to make sure that the only kind of re-entrancy is this * code being called by itself: since the count+lock is not an atomic * operation, we can allow no races. /** * xt_write_recseq_begin - start of a write section * * _Only_ that special combination of being per-cpu and never getting * re-entered asynchronously means that the count is safe. * Begin packet processing : all readers must wait the end * 1) Must be called with preemption disabled * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) * Returns : * 1 if no recursion on this cpu * 0 if recursion detected */ static inline void xt_info_rdlock_bh(void) static inline unsigned int xt_write_recseq_begin(void) { struct xt_info_lock *lock; unsigned int addend; local_bh_disable(); lock = &__get_cpu_var(xt_info_locks); if (likely(!lock->readers++)) write_seqlock(&lock->lock); } static inline void xt_info_rdunlock_bh(void) { struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); if (likely(!--lock->readers)) write_sequnlock(&lock->lock); local_bh_enable(); } /* * Low order bit of sequence is set if we already * called xt_write_recseq_begin(). */ addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1; /* * The "writer" side needs to get exclusive access to the lock, * regardless of readers. This must be called with bottom half * processing (and thus also preemption) disabled. * This is kind of a write_seqcount_begin(), but addend is 0 or 1 * We dont check addend value to avoid a test and conditional jump, * since addend is most likely 1 */ static inline void xt_info_wrlock(unsigned int cpu) { write_seqlock(&per_cpu(xt_info_locks, cpu).lock); __this_cpu_add(xt_recseq.sequence, addend); smp_wmb(); return addend; } static inline void xt_info_wrunlock(unsigned int cpu) /** * xt_write_recseq_end - end of a write section * @addend: return value from previous xt_write_recseq_begin() * * End packet processing : all readers can proceed * 1) Must be called with preemption disabled * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) */ static inline void xt_write_recseq_end(unsigned int addend) { write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ smp_wmb(); __this_cpu_add(xt_recseq.sequence, addend); } /* Loading net/ipv4/netfilter/arp_tables.c +11 −7 Original line number Diff line number Diff line Loading @@ -260,6 +260,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, void *table_base; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) return NF_DROP; Loading @@ -267,7 +268,8 @@ unsigned int arpt_do_table(struct sk_buff *skb, indev = in ? in->name : nulldevname; outdev = out ? out->name : nulldevname; xt_info_rdlock_bh(); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; table_base = private->entries[smp_processor_id()]; Loading Loading @@ -338,7 +340,8 @@ unsigned int arpt_do_table(struct sk_buff *skb, /* Verdict */ break; } while (!acpar.hotdrop); xt_info_rdunlock_bh(); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; Loading Loading @@ -712,7 +715,7 @@ static void get_counters(const struct xt_table_info *t, unsigned int i; for_each_possible_cpu(cpu) { seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries[cpu], t->size) { Loading @@ -720,10 +723,10 @@ static void get_counters(const struct xt_table_info *t, unsigned int start; do { start = read_seqbegin(lock); start = read_seqcount_begin(s); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; Loading Loading @@ -1115,6 +1118,7 @@ static int do_add_counters(struct net *net, const void __user *user, int ret = 0; void *loc_cpu_entry; struct arpt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; Loading Loading @@ -1171,12 +1175,12 @@ static int do_add_counters(struct net *net, const void __user *user, /* Choose the copy that is on our node */ curcpu = smp_processor_id(); loc_cpu_entry = private->entries[curcpu]; xt_info_wrlock(curcpu); addend = xt_write_recseq_begin(); xt_entry_foreach(iter, loc_cpu_entry, private->size) { ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_info_wrunlock(curcpu); xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); Loading net/ipv4/netfilter/ip_tables.c +12 −16 Original line number Diff line number Diff line Loading @@ -68,15 +68,6 @@ void *ipt_alloc_initial_table(const struct xt_table *info) } EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); /* We keep a set of rules for each CPU, so we can avoid write-locking them in the softirq when updating the counters and therefore only need to read-lock in the softirq; doing a write_lock_bh() in user context stops packets coming through and allows user context to read the counters or update the rules. Hence the start of any table is given by get_table() below. */ /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool Loading Loading @@ -311,6 +302,7 @@ ipt_do_table(struct sk_buff *skb, unsigned int *stackptr, origptr, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ ip = ip_hdr(skb); Loading @@ -331,7 +323,8 @@ ipt_do_table(struct sk_buff *skb, acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); xt_info_rdlock_bh(); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); table_base = private->entries[cpu]; Loading Loading @@ -430,7 +423,9 @@ ipt_do_table(struct sk_buff *skb, pr_debug("Exiting %s; resetting sp from %u to %u\n", __func__, *stackptr, origptr); *stackptr = origptr; xt_info_rdunlock_bh(); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else Loading Loading @@ -886,7 +881,7 @@ get_counters(const struct xt_table_info *t, unsigned int i; for_each_possible_cpu(cpu) { seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries[cpu], t->size) { Loading @@ -894,10 +889,10 @@ get_counters(const struct xt_table_info *t, unsigned int start; do { start = read_seqbegin(lock); start = read_seqcount_begin(s); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ Loading Loading @@ -1312,6 +1307,7 @@ do_add_counters(struct net *net, const void __user *user, int ret = 0; void *loc_cpu_entry; struct ipt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; Loading Loading @@ -1368,12 +1364,12 @@ do_add_counters(struct net *net, const void __user *user, /* Choose the copy that is on our node */ curcpu = smp_processor_id(); loc_cpu_entry = private->entries[curcpu]; xt_info_wrlock(curcpu); addend = xt_write_recseq_begin(); xt_entry_foreach(iter, loc_cpu_entry, private->size) { ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_info_wrunlock(curcpu); xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); Loading net/ipv6/netfilter/ip6_tables.c +12 −7 Original line number Diff line number Diff line Loading @@ -340,6 +340,7 @@ ip6t_do_table(struct sk_buff *skb, unsigned int *stackptr, origptr, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ indev = in ? in->name : nulldevname; Loading @@ -358,7 +359,8 @@ ip6t_do_table(struct sk_buff *skb, IP_NF_ASSERT(table->valid_hooks & (1 << hook)); xt_info_rdlock_bh(); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); table_base = private->entries[cpu]; Loading Loading @@ -442,7 +444,9 @@ ip6t_do_table(struct sk_buff *skb, } while (!acpar.hotdrop); *stackptr = origptr; xt_info_rdunlock_bh(); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; Loading Loading @@ -899,7 +903,7 @@ get_counters(const struct xt_table_info *t, unsigned int i; for_each_possible_cpu(cpu) { seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries[cpu], t->size) { Loading @@ -907,10 +911,10 @@ get_counters(const struct xt_table_info *t, unsigned int start; do { start = read_seqbegin(lock); start = read_seqcount_begin(s); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; Loading Loading @@ -1325,6 +1329,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, int ret = 0; const void *loc_cpu_entry; struct ip6t_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; Loading Loading @@ -1381,13 +1386,13 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, i = 0; /* Choose the copy that is on our node */ curcpu = smp_processor_id(); xt_info_wrlock(curcpu); addend = xt_write_recseq_begin(); loc_cpu_entry = private->entries[curcpu]; xt_entry_foreach(iter, loc_cpu_entry, private->size) { ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_info_wrunlock(curcpu); xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); Loading Loading
include/linux/netfilter/ipset/ip_set_getport.h +2 −0 Original line number Diff line number Diff line Loading @@ -22,7 +22,9 @@ static inline bool ip_set_proto_with_ports(u8 proto) { switch (proto) { case IPPROTO_TCP: case IPPROTO_SCTP: case IPPROTO_UDP: case IPPROTO_UDPLITE: return true; } return false; Loading
include/linux/netfilter/x_tables.h +42 −54 Original line number Diff line number Diff line Loading @@ -456,72 +456,60 @@ extern void xt_proto_fini(struct net *net, u_int8_t af); extern struct xt_table_info *xt_alloc_table_info(unsigned int size); extern void xt_free_table_info(struct xt_table_info *info); /* * Per-CPU spinlock associated with per-cpu table entries, and * with a counter for the "reading" side that allows a recursive * reader to avoid taking the lock and deadlocking. * * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu. * It needs to ensure that the rules are not being changed while the packet * is being processed. In some cases, the read lock will be acquired * twice on the same CPU; this is okay because of the count. /** * xt_recseq - recursive seqcount for netfilter use * * "writing" is used when reading counters. * During replace any readers that are using the old tables have to complete * before freeing the old table. This is handled by the write locking * necessary for reading the counters. * Packet processing changes the seqcount only if no recursion happened * get_counters() can use read_seqcount_begin()/read_seqcount_retry(), * because we use the normal seqcount convention : * Low order bit set to 1 if a writer is active. */ struct xt_info_lock { seqlock_t lock; unsigned char readers; }; DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); DECLARE_PER_CPU(seqcount_t, xt_recseq); /* * Note: we need to ensure that preemption is disabled before acquiring * the per-cpu-variable, so we do it as a two step process rather than * using "spin_lock_bh()". * * We _also_ need to disable bottom half processing before updating our * nesting count, to make sure that the only kind of re-entrancy is this * code being called by itself: since the count+lock is not an atomic * operation, we can allow no races. /** * xt_write_recseq_begin - start of a write section * * _Only_ that special combination of being per-cpu and never getting * re-entered asynchronously means that the count is safe. * Begin packet processing : all readers must wait the end * 1) Must be called with preemption disabled * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) * Returns : * 1 if no recursion on this cpu * 0 if recursion detected */ static inline void xt_info_rdlock_bh(void) static inline unsigned int xt_write_recseq_begin(void) { struct xt_info_lock *lock; unsigned int addend; local_bh_disable(); lock = &__get_cpu_var(xt_info_locks); if (likely(!lock->readers++)) write_seqlock(&lock->lock); } static inline void xt_info_rdunlock_bh(void) { struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); if (likely(!--lock->readers)) write_sequnlock(&lock->lock); local_bh_enable(); } /* * Low order bit of sequence is set if we already * called xt_write_recseq_begin(). */ addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1; /* * The "writer" side needs to get exclusive access to the lock, * regardless of readers. This must be called with bottom half * processing (and thus also preemption) disabled. * This is kind of a write_seqcount_begin(), but addend is 0 or 1 * We dont check addend value to avoid a test and conditional jump, * since addend is most likely 1 */ static inline void xt_info_wrlock(unsigned int cpu) { write_seqlock(&per_cpu(xt_info_locks, cpu).lock); __this_cpu_add(xt_recseq.sequence, addend); smp_wmb(); return addend; } static inline void xt_info_wrunlock(unsigned int cpu) /** * xt_write_recseq_end - end of a write section * @addend: return value from previous xt_write_recseq_begin() * * End packet processing : all readers can proceed * 1) Must be called with preemption disabled * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) */ static inline void xt_write_recseq_end(unsigned int addend) { write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ smp_wmb(); __this_cpu_add(xt_recseq.sequence, addend); } /* Loading
net/ipv4/netfilter/arp_tables.c +11 −7 Original line number Diff line number Diff line Loading @@ -260,6 +260,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, void *table_base; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) return NF_DROP; Loading @@ -267,7 +268,8 @@ unsigned int arpt_do_table(struct sk_buff *skb, indev = in ? in->name : nulldevname; outdev = out ? out->name : nulldevname; xt_info_rdlock_bh(); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; table_base = private->entries[smp_processor_id()]; Loading Loading @@ -338,7 +340,8 @@ unsigned int arpt_do_table(struct sk_buff *skb, /* Verdict */ break; } while (!acpar.hotdrop); xt_info_rdunlock_bh(); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; Loading Loading @@ -712,7 +715,7 @@ static void get_counters(const struct xt_table_info *t, unsigned int i; for_each_possible_cpu(cpu) { seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries[cpu], t->size) { Loading @@ -720,10 +723,10 @@ static void get_counters(const struct xt_table_info *t, unsigned int start; do { start = read_seqbegin(lock); start = read_seqcount_begin(s); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; Loading Loading @@ -1115,6 +1118,7 @@ static int do_add_counters(struct net *net, const void __user *user, int ret = 0; void *loc_cpu_entry; struct arpt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; Loading Loading @@ -1171,12 +1175,12 @@ static int do_add_counters(struct net *net, const void __user *user, /* Choose the copy that is on our node */ curcpu = smp_processor_id(); loc_cpu_entry = private->entries[curcpu]; xt_info_wrlock(curcpu); addend = xt_write_recseq_begin(); xt_entry_foreach(iter, loc_cpu_entry, private->size) { ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_info_wrunlock(curcpu); xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); Loading
net/ipv4/netfilter/ip_tables.c +12 −16 Original line number Diff line number Diff line Loading @@ -68,15 +68,6 @@ void *ipt_alloc_initial_table(const struct xt_table *info) } EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); /* We keep a set of rules for each CPU, so we can avoid write-locking them in the softirq when updating the counters and therefore only need to read-lock in the softirq; doing a write_lock_bh() in user context stops packets coming through and allows user context to read the counters or update the rules. Hence the start of any table is given by get_table() below. */ /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool Loading Loading @@ -311,6 +302,7 @@ ipt_do_table(struct sk_buff *skb, unsigned int *stackptr, origptr, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ ip = ip_hdr(skb); Loading @@ -331,7 +323,8 @@ ipt_do_table(struct sk_buff *skb, acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); xt_info_rdlock_bh(); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); table_base = private->entries[cpu]; Loading Loading @@ -430,7 +423,9 @@ ipt_do_table(struct sk_buff *skb, pr_debug("Exiting %s; resetting sp from %u to %u\n", __func__, *stackptr, origptr); *stackptr = origptr; xt_info_rdunlock_bh(); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else Loading Loading @@ -886,7 +881,7 @@ get_counters(const struct xt_table_info *t, unsigned int i; for_each_possible_cpu(cpu) { seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries[cpu], t->size) { Loading @@ -894,10 +889,10 @@ get_counters(const struct xt_table_info *t, unsigned int start; do { start = read_seqbegin(lock); start = read_seqcount_begin(s); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ Loading Loading @@ -1312,6 +1307,7 @@ do_add_counters(struct net *net, const void __user *user, int ret = 0; void *loc_cpu_entry; struct ipt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; Loading Loading @@ -1368,12 +1364,12 @@ do_add_counters(struct net *net, const void __user *user, /* Choose the copy that is on our node */ curcpu = smp_processor_id(); loc_cpu_entry = private->entries[curcpu]; xt_info_wrlock(curcpu); addend = xt_write_recseq_begin(); xt_entry_foreach(iter, loc_cpu_entry, private->size) { ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_info_wrunlock(curcpu); xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); Loading
net/ipv6/netfilter/ip6_tables.c +12 −7 Original line number Diff line number Diff line Loading @@ -340,6 +340,7 @@ ip6t_do_table(struct sk_buff *skb, unsigned int *stackptr, origptr, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ indev = in ? in->name : nulldevname; Loading @@ -358,7 +359,8 @@ ip6t_do_table(struct sk_buff *skb, IP_NF_ASSERT(table->valid_hooks & (1 << hook)); xt_info_rdlock_bh(); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); table_base = private->entries[cpu]; Loading Loading @@ -442,7 +444,9 @@ ip6t_do_table(struct sk_buff *skb, } while (!acpar.hotdrop); *stackptr = origptr; xt_info_rdunlock_bh(); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; Loading Loading @@ -899,7 +903,7 @@ get_counters(const struct xt_table_info *t, unsigned int i; for_each_possible_cpu(cpu) { seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries[cpu], t->size) { Loading @@ -907,10 +911,10 @@ get_counters(const struct xt_table_info *t, unsigned int start; do { start = read_seqbegin(lock); start = read_seqcount_begin(s); bcnt = iter->counters.bcnt; pcnt = iter->counters.pcnt; } while (read_seqretry(lock, start)); } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; Loading Loading @@ -1325,6 +1329,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, int ret = 0; const void *loc_cpu_entry; struct ip6t_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; Loading Loading @@ -1381,13 +1386,13 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, i = 0; /* Choose the copy that is on our node */ curcpu = smp_processor_id(); xt_info_wrlock(curcpu); addend = xt_write_recseq_begin(); loc_cpu_entry = private->entries[curcpu]; xt_entry_foreach(iter, loc_cpu_entry, private->size) { ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_info_wrunlock(curcpu); xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); Loading